text
stringlengths 2.5k
6.39M
| kind
stringclasses 3
values |
---|---|
namespace xlib {
namespace detail {
__device__ __forceinline__
constexpr int min_max_lane(int STEP) {
const int MASK_WARP = (1 << (STEP + 1)) - 1;
return ((31 - MASK_WARP) << 8) | MASK_WARP;
}
//==============================================================================
template<unsigned WARP_SZ, typename T>
struct WarpReduceHelper {
static const unsigned member_mask = xlib::member_mask<WARP_SZ>();
__device__ __forceinline__
static void add(T& value) {
#pragma unroll
for (int STEP = 1; STEP <= WARP_SZ / 2; STEP = STEP * 2)
value += xlib::shfl_xor(member_mask, value, STEP);
}
__device__ __forceinline__
static void min(T& value) {
#pragma unroll
for (int STEP = 1; STEP <= WARP_SZ / 2; STEP = STEP * 2) {
auto tmp = xlib::shfl_xor(member_mask, value, STEP);
value = tmp < value ? tmp : value;
}
}
__device__ __forceinline__
static void max(T& value) {
#pragma unroll
for (int STEP = 1; STEP <= WARP_SZ / 2; STEP = STEP * 2) {
auto tmp = xlib::shfl_xor(member_mask, value, STEP);
value = tmp > value ? tmp : value;
}
}
template<typename Lambda>
__device__ __forceinline__
static void apply(T& value, const Lambda& lambda) {
#pragma unroll
for (int STEP = 1; STEP <= WARP_SZ / 2; STEP = STEP * 2) {
auto tmp = xlib::shfl_xor(member_mask, value, STEP);
value = lambda(value, tmp);
}
}
};
//------------------------------------------------------------------------------
template<unsigned WARP_SZ>
struct WarpReduceHelper<WARP_SZ, int> {
static const unsigned member_mask = xlib::member_mask<WARP_SZ>();
__device__ __forceinline__
static void add(int& value) {
WARP_REDUCE_32BIT(add, s32, r, min_max_lane(STEP))
}
__device__ __forceinline__
static void min(int& value) {
WARP_REDUCE_32BIT(min, s32, r, min_max_lane(STEP))
}
__device__ __forceinline__
static void max(int& value) {
WARP_REDUCE_32BIT(max, s32, r, min_max_lane(STEP))
}
__device__ __forceinline__
static void add(int (&value)[2]) {
WARP_REDUCE_GEN2(add, s32, r, min_max_lane(STEP))
}
__device__ __forceinline__
static void add(int (&value)[3]) {
WARP_REDUCE_GEN3(add, s32, r, min_max_lane(STEP))
}
__device__ __forceinline__
static void add(int (&value)[4]) {
WARP_REDUCE_GEN4(add, s32, r, min_max_lane(STEP))
}
__device__ __forceinline__
static void add(int (&value)[5]) {
WARP_REDUCE_GEN5(add, s32, r, min_max_lane(STEP))
}
__device__ __forceinline__
static void add(int (&value)[6]) {
WARP_REDUCE_GEN6(add, s32, r, min_max_lane(STEP))
}
__device__ __forceinline__
static void add(int (&value)[7]) {
WARP_REDUCE_GEN7(add, s32, r, min_max_lane(STEP))
}
__device__ __forceinline__
static void add(int (&value)[8]) {
WARP_REDUCE_GEN8(add, s32, r, min_max_lane(STEP))
}
__device__ __forceinline__
static void add(int (&value)[9]) {
WARP_REDUCE_GEN9(add, s32, r, min_max_lane(STEP))
}
__device__ __forceinline__
static void add(int (&value)[10]) {
WARP_REDUCE_GEN10(add, s32, r, min_max_lane(STEP))
}
__device__ __forceinline__
static void add(int (&value)[11]) {
WARP_REDUCE_GEN11(add, s32, r, min_max_lane(STEP))
}
__device__ __forceinline__
static void add(int (&value)[12]) {
WARP_REDUCE_GEN12(add, s32, r, min_max_lane(STEP))
}
__device__ __forceinline__
static void add(int (&value)[13]) {
WARP_REDUCE_GEN13(add, s32, r, min_max_lane(STEP))
}
__device__ __forceinline__
static void add(int (&value)[14]) {
WARP_REDUCE_GEN14(add, s32, r, min_max_lane(STEP))
}
__device__ __forceinline__
static void add(int (&value)[15]) {
WARP_REDUCE_GEN15(add, s32, r, min_max_lane(STEP))
}
__device__ __forceinline__
static void add(int (&value)[16]) {
WARP_REDUCE_GEN16(add, s32, r, min_max_lane(STEP))
}
};
//------------------------------------------------------------------------------
template<unsigned WARP_SZ>
struct WarpReduceHelper<WARP_SZ, unsigned> {
static const unsigned member_mask = xlib::member_mask<WARP_SZ>();
__device__ __forceinline__
static void add(unsigned& value) {
WARP_REDUCE_32BIT(add, u32, r, min_max_lane(STEP))
}
__device__ __forceinline__
static void min(unsigned& value) {
WARP_REDUCE_32BIT(min, u32, r, min_max_lane(STEP))
}
__device__ __forceinline__
static void max(unsigned& value) {
WARP_REDUCE_32BIT(max, u32, r, min_max_lane(STEP))
}
};
//------------------------------------------------------------------------------
template<unsigned WARP_SZ>
struct WarpReduceHelper<WARP_SZ, float> {
static const unsigned member_mask = xlib::member_mask<WARP_SZ>();
__device__ __forceinline__
static void add(float& value) {
WARP_REDUCE_32BIT(add, f32, f, min_max_lane(STEP))
}
__device__ __forceinline__
static void min(float& value) {
WARP_REDUCE_32BIT(min, f32, f, min_max_lane(STEP))
}
__device__ __forceinline__
static void max(float& value) {
WARP_REDUCE_32BIT(max, f32, f, min_max_lane(STEP))
}
__device__ __forceinline__
static void add(float (&value)[2]) {
WARP_REDUCE_GEN2(add, f32, f, min_max_lane(STEP))
}
__device__ __forceinline__
static void add(float (&value)[3]) {
WARP_REDUCE_GEN3(add, f32, f, min_max_lane(STEP))
}
__device__ __forceinline__
static void add(float (&value)[4]) {
WARP_REDUCE_GEN4(add, f32, f, min_max_lane(STEP))
}
__device__ __forceinline__
static void add(float (&value)[5]) {
WARP_REDUCE_GEN5(add, f32, f, min_max_lane(STEP))
}
__device__ __forceinline__
static void add(float (&value)[6]) {
WARP_REDUCE_GEN6(add, f32, f, min_max_lane(STEP))
}
__device__ __forceinline__
static void add(float (&value)[7]) {
WARP_REDUCE_GEN7(add, f32, f, min_max_lane(STEP))
}
__device__ __forceinline__
static void add(float (&value)[8]) {
WARP_REDUCE_GEN8(add, f32, f, min_max_lane(STEP))
}
__device__ __forceinline__
static void add(float (&value)[9]) {
WARP_REDUCE_GEN9(add, f32, f, min_max_lane(STEP))
}
__device__ __forceinline__
static void add(float (&value)[10]) {
WARP_REDUCE_GEN10(add, f32, f, min_max_lane(STEP))
}
__device__ __forceinline__
static void add(float (&value)[11]) {
WARP_REDUCE_GEN11(add, f32, f, min_max_lane(STEP))
}
__device__ __forceinline__
static void add(float (&value)[12]) {
WARP_REDUCE_GEN12(add, f32, f, min_max_lane(STEP))
}
__device__ __forceinline__
static void add(float (&value)[13]) {
WARP_REDUCE_GEN13(add, f32, f, min_max_lane(STEP))
}
__device__ __forceinline__
static void add(float (&value)[14]) {
WARP_REDUCE_GEN14(add, f32, f, min_max_lane(STEP))
}
__device__ __forceinline__
static void add(float (&value)[15]) {
WARP_REDUCE_GEN15(add, f32, f, min_max_lane(STEP))
}
__device__ __forceinline__
static void add(float (&value)[16]) {
WARP_REDUCE_GEN16(add, f32, f, min_max_lane(STEP))
}
};
//------------------------------------------------------------------------------
template<unsigned WARP_SZ>
struct WarpReduceHelper<WARP_SZ, double> {
static const unsigned member_mask = xlib::member_mask<WARP_SZ>();
__device__ __forceinline__
static void add(double& value) {
WARP_REDUCE_64BIT(add, f64, d, min_max_lane(STEP))
}
__device__ __forceinline__
static void min(double& value) {
WARP_REDUCE_64BIT(min, f64, d, min_max_lane(STEP))
}
__device__ __forceinline__
static void max(double& value) {
WARP_REDUCE_64BIT(max, f64, d, min_max_lane(STEP))
}
};
//------------------------------------------------------------------------------
template<unsigned WARP_SZ>
struct WarpReduceHelper<WARP_SZ, int64_t> {
static const unsigned member_mask = xlib::member_mask<WARP_SZ>();
__device__ __forceinline__
static void add(int64_t& value) {
WARP_REDUCE_64BIT(add, s64, l, min_max_lane(STEP))
}
__device__ __forceinline__
static void min(int64_t& value) {
WARP_REDUCE_64BIT(min, s64, l, min_max_lane(STEP))
}
__device__ __forceinline__
static void max(int64_t& value) {
WARP_REDUCE_64BIT(max, s64, l, min_max_lane(STEP))
}
};
//------------------------------------------------------------------------------
template<unsigned WARP_SZ>
struct WarpReduceHelper<WARP_SZ, uint64_t> {
static const unsigned member_mask = xlib::member_mask<WARP_SZ>();
__device__ __forceinline__
static void add(uint64_t& value) {
WARP_REDUCE_64BIT(add, u64, l, min_max_lane(STEP))
}
__device__ __forceinline__
static void min(uint64_t& value) {
WARP_REDUCE_64BIT(min, u64, l, min_max_lane(STEP))
}
__device__ __forceinline__
static void max(uint64_t& value) {
WARP_REDUCE_64BIT(max, u64, l, min_max_lane(STEP))
}
};
#undef WARP_REDUCE_32BIT
#undef WARP_REDUCE_64BIT
#undef WARP_REDUCE_GEN1
#undef WARP_REDUCE_GEN2
#undef WARP_REDUCE_GEN3
#undef WARP_REDUCE_GEN4
#undef WARP_REDUCE_GEN5
#undef WARP_REDUCE_GEN6
#undef WARP_REDUCE_GEN7
#undef WARP_REDUCE_GEN8
#undef WARP_REDUCE_GEN9
#undef WARP_REDUCE_GEN10
#undef WARP_REDUCE_GEN11
#undef WARP_REDUCE_GEN12
#undef WARP_REDUCE_GEN13
#undef WARP_REDUCE_GEN14
#undef WARP_REDUCE_GEN15
#undef WARP_REDUCE_GEN16
} // namespace detail
//==============================================================================
//==============================================================================
template<int VW_SIZE>
template<typename T>
__device__ __forceinline__
void WarpReduce<VW_SIZE>::add(T& value) {
detail::WarpReduceHelper<VW_SIZE, T>::add(value);
}
template<int VW_SIZE>
template<typename T>
__device__ __forceinline__
void WarpReduce<VW_SIZE>::min(T& value) {
detail::WarpReduceHelper<VW_SIZE, T>::min(value);
}
template<int VW_SIZE>
template<typename T>
__device__ __forceinline__
void WarpReduce<VW_SIZE>::max(T& value) {
detail::WarpReduceHelper<VW_SIZE, T>::max(value);
}
template<int VW_SIZE>
template<typename T, int SIZE>
__device__ __forceinline__
void WarpReduce<VW_SIZE>::add(T (&value)[SIZE]) {
detail::WarpReduceHelper<VW_SIZE, T>::add(value);
}
//==============================================================================
template<int VW_SIZE>
template<typename T>
__device__ __forceinline__
void WarpReduce<VW_SIZE>::addAll(T& value) {
const unsigned member_mask = xlib::member_mask<VW_SIZE>();
detail::WarpReduceHelper<VW_SIZE, T>::add(value);
value = xlib::shfl(member_mask, value, 0, VW_SIZE);
}
template<int VW_SIZE>
template<typename T>
__device__ __forceinline__
void WarpReduce<VW_SIZE>::minAll(T& value) {
const unsigned member_mask = xlib::member_mask<VW_SIZE>();
detail::WarpReduceHelper<VW_SIZE, T>::min(value);
value = xlib::shfl(member_mask, value, 0, VW_SIZE);
}
template<int VW_SIZE>
template<typename T>
__device__ __forceinline__
void WarpReduce<VW_SIZE>::maxAll(T& value) {
const unsigned member_mask = xlib::member_mask<VW_SIZE>();
detail::WarpReduceHelper<VW_SIZE, T>::max(value);
value = xlib::shfl(member_mask, value, 0, VW_SIZE);
}
//==============================================================================
template<int VW_SIZE>
template<typename T, typename R>
__device__ __forceinline__
void WarpReduce<VW_SIZE>::add(T& value, R* pointer) {
detail::WarpReduceHelper<VW_SIZE, T>::add(value);
if (lane_id<VW_SIZE>() == 0)
*pointer = value;
}
template<int VW_SIZE>
template<typename T, typename R>
__device__ __forceinline__
void WarpReduce<VW_SIZE>::min(T& value, R* pointer) {
detail::WarpReduceHelper<VW_SIZE, T>::min(value);
if (lane_id<VW_SIZE>() == 0)
*pointer = value;
}
template<int VW_SIZE>
template<typename T, typename R>
__device__ __forceinline__
void WarpReduce<VW_SIZE>::max(T& value, R* pointer) {
detail::WarpReduceHelper<VW_SIZE, T>::max(value);
if (lane_id<VW_SIZE>() == 0)
*pointer = value;
}
//==============================================================================
template<int VW_SIZE>
template<typename T, typename R>
__device__ __forceinline__
T WarpReduce<VW_SIZE>::atomicAdd(const T& value, R* pointer) {
const unsigned member_mask = xlib::member_mask<VW_SIZE>();
T old, value_tmp = value;
detail::WarpReduceHelper<VW_SIZE, T>::add(value_tmp);
if (lane_id<VW_SIZE>() == 0)
old = atomic::add(value_tmp, pointer);
return xlib::shfl(member_mask, old, 0, VW_SIZE);
}
template<int VW_SIZE>
template<typename T, typename R>
__device__ __forceinline__
void WarpReduce<VW_SIZE>::atomicMin(const T& value, R* pointer) {
T value_tmp = value;
detail::WarpReduceHelper<VW_SIZE, T>::min(value_tmp);
if (lane_id<VW_SIZE>() == 0)
atomic::min(value_tmp, pointer);
}
template<int VW_SIZE>
template<typename T, typename R>
__device__ __forceinline__
void WarpReduce<VW_SIZE>::atomicMax(const T& value, R* pointer) {
T value_tmp = value;
detail::WarpReduceHelper<VW_SIZE, T>::max(value_tmp);
if (lane_id<VW_SIZE>() == 0)
atomic::max(value_tmp, pointer);
}
} // namespace xlib
|
the_stack
|
#ifndef HAVE_OPENCV_CUDEV
#error "opencv_cudev is required"
#else
#include "opencv2/cudev.hpp"
using namespace cv::cudev;
void divScalar(const GpuMat& src, cv::Scalar val, bool inv, GpuMat& dst, const GpuMat& mask, double scale, Stream& stream, int);
namespace
{
template <typename T, int cn> struct SafeDiv;
template <typename T> struct SafeDiv<T, 1>
{
__device__ __forceinline__ static T op(T a, T b)
{
return b != 0 ? a / b : 0;
}
};
template <typename T> struct SafeDiv<T, 2>
{
__device__ __forceinline__ static T op(const T& a, const T& b)
{
T res;
res.x = b.x != 0 ? a.x / b.x : 0;
res.y = b.y != 0 ? a.y / b.y : 0;
return res;
}
};
template <typename T> struct SafeDiv<T, 3>
{
__device__ __forceinline__ static T op(const T& a, const T& b)
{
T res;
res.x = b.x != 0 ? a.x / b.x : 0;
res.y = b.y != 0 ? a.y / b.y : 0;
res.z = b.z != 0 ? a.z / b.z : 0;
return res;
}
};
template <typename T> struct SafeDiv<T, 4>
{
__device__ __forceinline__ static T op(const T& a, const T& b)
{
T res;
res.x = b.x != 0 ? a.x / b.x : 0;
res.y = b.y != 0 ? a.y / b.y : 0;
res.z = b.z != 0 ? a.z / b.z : 0;
res.w = b.w != 0 ? a.w / b.w : 0;
return res;
}
};
template <typename SrcType, typename ScalarType, typename DstType> struct DivScalarOp : unary_function<SrcType, DstType>
{
ScalarType val;
__device__ __forceinline__ DstType operator ()(SrcType a) const
{
return saturate_cast<DstType>(SafeDiv<ScalarType, VecTraits<ScalarType>::cn>::op(saturate_cast<ScalarType>(a), val));
}
};
template <typename SrcType, typename ScalarType, typename DstType> struct DivScalarOpInv : unary_function<SrcType, DstType>
{
ScalarType val;
__device__ __forceinline__ DstType operator ()(SrcType a) const
{
return saturate_cast<DstType>(SafeDiv<ScalarType, VecTraits<ScalarType>::cn>::op(val, saturate_cast<ScalarType>(a)));
}
};
template <typename ScalarDepth> struct TransformPolicy : DefaultTransformPolicy
{
};
template <> struct TransformPolicy<double> : DefaultTransformPolicy
{
enum {
shift = 1
};
};
template <typename SrcType, typename ScalarDepth, typename DstType>
void divScalarImpl(const GpuMat& src, cv::Scalar value, bool inv, GpuMat& dst, Stream& stream)
{
typedef typename MakeVec<ScalarDepth, VecTraits<SrcType>::cn>::type ScalarType;
cv::Scalar_<ScalarDepth> value_ = value;
if (inv)
{
DivScalarOpInv<SrcType, ScalarType, DstType> op;
op.val = VecTraits<ScalarType>::make(value_.val);
gridTransformUnary_< TransformPolicy<ScalarDepth> >(globPtr<SrcType>(src), globPtr<DstType>(dst), op, stream);
}
else
{
DivScalarOp<SrcType, ScalarType, DstType> op;
op.val = VecTraits<ScalarType>::make(value_.val);
gridTransformUnary_< TransformPolicy<ScalarDepth> >(globPtr<SrcType>(src), globPtr<DstType>(dst), op, stream);
}
}
}
void divScalar(const GpuMat& src, cv::Scalar val, bool inv, GpuMat& dst, const GpuMat&, double scale, Stream& stream, int)
{
typedef void (*func_t)(const GpuMat& src, cv::Scalar val, bool inv, GpuMat& dst, Stream& stream);
static const func_t funcs[7][7][4] =
{
{
{divScalarImpl<uchar, float, uchar>, divScalarImpl<uchar2, float, uchar2>, divScalarImpl<uchar3, float, uchar3>, divScalarImpl<uchar4, float, uchar4>},
{divScalarImpl<uchar, float, schar>, divScalarImpl<uchar2, float, char2>, divScalarImpl<uchar3, float, char3>, divScalarImpl<uchar4, float, char4>},
{divScalarImpl<uchar, float, ushort>, divScalarImpl<uchar2, float, ushort2>, divScalarImpl<uchar3, float, ushort3>, divScalarImpl<uchar4, float, ushort4>},
{divScalarImpl<uchar, float, short>, divScalarImpl<uchar2, float, short2>, divScalarImpl<uchar3, float, short3>, divScalarImpl<uchar4, float, short4>},
{divScalarImpl<uchar, float, int>, divScalarImpl<uchar2, float, int2>, divScalarImpl<uchar3, float, int3>, divScalarImpl<uchar4, float, int4>},
{divScalarImpl<uchar, float, float>, divScalarImpl<uchar2, float, float2>, divScalarImpl<uchar3, float, float3>, divScalarImpl<uchar4, float, float4>},
{divScalarImpl<uchar, double, double>, divScalarImpl<uchar2, double, double2>, divScalarImpl<uchar3, double, double3>, divScalarImpl<uchar4, double, double4>}
},
{
{divScalarImpl<schar, float, uchar>, divScalarImpl<char2, float, uchar2>, divScalarImpl<char3, float, uchar3>, divScalarImpl<char4, float, uchar4>},
{divScalarImpl<schar, float, schar>, divScalarImpl<char2, float, char2>, divScalarImpl<char3, float, char3>, divScalarImpl<char4, float, char4>},
{divScalarImpl<schar, float, ushort>, divScalarImpl<char2, float, ushort2>, divScalarImpl<char3, float, ushort3>, divScalarImpl<char4, float, ushort4>},
{divScalarImpl<schar, float, short>, divScalarImpl<char2, float, short2>, divScalarImpl<char3, float, short3>, divScalarImpl<char4, float, short4>},
{divScalarImpl<schar, float, int>, divScalarImpl<char2, float, int2>, divScalarImpl<char3, float, int3>, divScalarImpl<char4, float, int4>},
{divScalarImpl<schar, float, float>, divScalarImpl<char2, float, float2>, divScalarImpl<char3, float, float3>, divScalarImpl<char4, float, float4>},
{divScalarImpl<schar, double, double>, divScalarImpl<char2, double, double2>, divScalarImpl<char3, double, double3>, divScalarImpl<char4, double, double4>}
},
{
{0 /*divScalarImpl<ushort, float, uchar>*/, 0 /*divScalarImpl<ushort2, float, uchar2>*/, 0 /*divScalarImpl<ushort3, float, uchar3>*/, 0 /*divScalarImpl<ushort4, float, uchar4>*/},
{0 /*divScalarImpl<ushort, float, schar>*/, 0 /*divScalarImpl<ushort2, float, char2>*/, 0 /*divScalarImpl<ushort3, float, char3>*/, 0 /*divScalarImpl<ushort4, float, char4>*/},
{divScalarImpl<ushort, float, ushort>, divScalarImpl<ushort2, float, ushort2>, divScalarImpl<ushort3, float, ushort3>, divScalarImpl<ushort4, float, ushort4>},
{divScalarImpl<ushort, float, short>, divScalarImpl<ushort2, float, short2>, divScalarImpl<ushort3, float, short3>, divScalarImpl<ushort4, float, short4>},
{divScalarImpl<ushort, float, int>, divScalarImpl<ushort2, float, int2>, divScalarImpl<ushort3, float, int3>, divScalarImpl<ushort4, float, int4>},
{divScalarImpl<ushort, float, float>, divScalarImpl<ushort2, float, float2>, divScalarImpl<ushort3, float, float3>, divScalarImpl<ushort4, float, float4>},
{divScalarImpl<ushort, double, double>, divScalarImpl<ushort2, double, double2>, divScalarImpl<ushort3, double, double3>, divScalarImpl<ushort4, double, double4>}
},
{
{0 /*divScalarImpl<short, float, uchar>*/, 0 /*divScalarImpl<short2, float, uchar2>*/, 0 /*divScalarImpl<short3, float, uchar3>*/, 0 /*divScalarImpl<short4, float, uchar4>*/},
{0 /*divScalarImpl<short, float, schar>*/, 0 /*divScalarImpl<short2, float, char2>*/, 0 /*divScalarImpl<short3, float, char3>*/, 0 /*divScalarImpl<short4, float, char4>*/},
{divScalarImpl<short, float, ushort>, divScalarImpl<short2, float, ushort2>, divScalarImpl<short3, float, ushort3>, divScalarImpl<short4, float, ushort4>},
{divScalarImpl<short, float, short>, divScalarImpl<short2, float, short2>, divScalarImpl<short3, float, short3>, divScalarImpl<short4, float, short4>},
{divScalarImpl<short, float, int>, divScalarImpl<short2, float, int2>, divScalarImpl<short3, float, int3>, divScalarImpl<short4, float, int4>},
{divScalarImpl<short, float, float>, divScalarImpl<short2, float, float2>, divScalarImpl<short3, float, float3>, divScalarImpl<short4, float, float4>},
{divScalarImpl<short, double, double>, divScalarImpl<short2, double, double2>, divScalarImpl<short3, double, double3>, divScalarImpl<short4, double, double4>}
},
{
{0 /*divScalarImpl<int, float, uchar>*/, 0 /*divScalarImpl<int2, float, uchar2>*/, 0 /*divScalarImpl<int3, float, uchar3>*/, 0 /*divScalarImpl<int4, float, uchar4>*/},
{0 /*divScalarImpl<int, float, schar>*/, 0 /*divScalarImpl<int2, float, char2>*/, 0 /*divScalarImpl<int3, float, char3>*/, 0 /*divScalarImpl<int4, float, char4>*/},
{0 /*divScalarImpl<int, float, ushort>*/, 0 /*divScalarImpl<int2, float, ushort2>*/, 0 /*divScalarImpl<int3, float, ushort3>*/, 0 /*divScalarImpl<int4, float, ushort4>*/},
{0 /*divScalarImpl<int, float, short>*/, 0 /*divScalarImpl<int2, float, short2>*/, 0 /*divScalarImpl<int3, float, short3>*/, 0 /*divScalarImpl<int4, float, short4>*/},
{divScalarImpl<int, float, int>, divScalarImpl<int2, float, int2>, divScalarImpl<int3, float, int3>, divScalarImpl<int4, float, int4>},
{divScalarImpl<int, float, float>, divScalarImpl<int2, float, float2>, divScalarImpl<int3, float, float3>, divScalarImpl<int4, float, float4>},
{divScalarImpl<int, double, double>, divScalarImpl<int2, double, double2>, divScalarImpl<int3, double, double3>, divScalarImpl<int4, double, double4>}
},
{
{0 /*divScalarImpl<float, float, uchar>*/, 0 /*divScalarImpl<float2, float, uchar2>*/, 0 /*divScalarImpl<float3, float, uchar3>*/, 0 /*divScalarImpl<float4, float, uchar4>*/},
{0 /*divScalarImpl<float, float, schar>*/, 0 /*divScalarImpl<float2, float, char2>*/, 0 /*divScalarImpl<float3, float, char3>*/, 0 /*divScalarImpl<float4, float, char4>*/},
{0 /*divScalarImpl<float, float, ushort>*/, 0 /*divScalarImpl<float2, float, ushort2>*/, 0 /*divScalarImpl<float3, float, ushort3>*/, 0 /*divScalarImpl<float4, float, ushort4>*/},
{0 /*divScalarImpl<float, float, short>*/, 0 /*divScalarImpl<float2, float, short2>*/, 0 /*divScalarImpl<float3, float, short3>*/, 0 /*divScalarImpl<float4, float, short4>*/},
{0 /*divScalarImpl<float, float, int>*/, 0 /*divScalarImpl<float2, float, int2>*/, 0 /*divScalarImpl<float3, float, int3>*/, 0 /*divScalarImpl<float4, float, int4>*/},
{divScalarImpl<float, float, float>, divScalarImpl<float2, float, float2>, divScalarImpl<float3, float, float3>, divScalarImpl<float4, float, float4>},
{divScalarImpl<float, double, double>, divScalarImpl<float2, double, double2>, divScalarImpl<float3, double, double3>, divScalarImpl<float4, double, double4>}
},
{
{0 /*divScalarImpl<double, double, uchar>*/, 0 /*divScalarImpl<double2, double, uchar2>*/, 0 /*divScalarImpl<double3, double, uchar3>*/, 0 /*divScalarImpl<double4, double, uchar4>*/},
{0 /*divScalarImpl<double, double, schar>*/, 0 /*divScalarImpl<double2, double, char2>*/, 0 /*divScalarImpl<double3, double, char3>*/, 0 /*divScalarImpl<double4, double, char4>*/},
{0 /*divScalarImpl<double, double, ushort>*/, 0 /*divScalarImpl<double2, double, ushort2>*/, 0 /*divScalarImpl<double3, double, ushort3>*/, 0 /*divScalarImpl<double4, double, ushort4>*/},
{0 /*divScalarImpl<double, double, short>*/, 0 /*divScalarImpl<double2, double, short2>*/, 0 /*divScalarImpl<double3, double, short3>*/, 0 /*divScalarImpl<double4, double, short4>*/},
{0 /*divScalarImpl<double, double, int>*/, 0 /*divScalarImpl<double2, double, int2>*/, 0 /*divScalarImpl<double3, double, int3>*/, 0 /*divScalarImpl<double4, double, int4>*/},
{0 /*divScalarImpl<double, double, float>*/, 0 /*divScalarImpl<double2, double, float2>*/, 0 /*divScalarImpl<double3, double, float3>*/, 0 /*divScalarImpl<double4, double, float4>*/},
{divScalarImpl<double, double, double>, divScalarImpl<double2, double, double2>, divScalarImpl<double3, double, double3>, divScalarImpl<double4, double, double4>}
}
};
const int sdepth = src.depth();
const int ddepth = dst.depth();
const int cn = src.channels();
CV_DbgAssert( sdepth <= CV_64F && ddepth <= CV_64F && cn <= 4 );
if (inv)
{
val[0] *= scale;
val[1] *= scale;
val[2] *= scale;
val[3] *= scale;
}
else
{
val[0] /= scale;
val[1] /= scale;
val[2] /= scale;
val[3] /= scale;
}
const func_t func = funcs[sdepth][ddepth][cn - 1];
if (!func)
CV_Error(cv::Error::StsUnsupportedFormat, "Unsupported combination of source and destination types");
func(src, val, inv, dst, stream);
}
#endif
|
the_stack
|
#pragma once
#include <math/vector.h>
#include <math/matrix.h>
struct NoBlending
{
__device__
math::float4 operator ()(const math::float4& src) const
{
return src;
}
};
struct AlphaBlending
{
__device__
math::float4 operator ()(const math::float4& src, const math::float4& dest) const
{
return src.w * src + (1.0f - src.w) * dest;
}
};
struct TextureBlending
{
__device__
math::float4 operator ()(const math::float4& src, const math::float4& dest) const
{
return src + (1.0f - src.w) * dest;
}
};
struct ClipspaceBlending
{
__device__
math::float4 operator ()(const math::float4& src, const math::float4& dest) const
{
//return 0.25f * src + 0.75f * dest;
return src + dest;
}
};
struct EyeCandyBlending
{
__device__
math::float4 operator ()(const math::float4& src, const math::float4& dest) const
{
return 0.25f * src + 0.75f * dest;
}
};
template <typename T>
class SeparableBlendOp : T
{
public:
__device__
math::float4 operator ()(const math::float4& src, const math::float4& dest) const
{
return { T::operator ()(dest.x, src.x), T::operator ()(dest.y, src.y), T::operator ()(dest.z, src.z), src.w };
}
};
struct Normal
{
__device__ float operator ()(float c_b, float c_s) const
{
return c_s;
}
};
struct FiftyFifty
{
__device__ float operator ()(float c_b, float c_s) const
{
return 0.5f * c_b + 0.5f * c_s;
}
};
struct Multiply
{
__device__ float operator ()(float c_b, float c_s) const
{
return c_b * c_s;
}
};
struct Screen
{
__device__ float operator ()(float c_b, float c_s) const
{
return c_b + c_s - (c_b * c_s);
}
};
struct Darken
{
__device__ float operator ()(float c_b, float c_s) const
{
return min(c_b, c_s);
}
};
struct Lighten
{
__device__ float operator ()(float c_b, float c_s) const
{
return max(c_b, c_s);
}
};
struct ColorDodge
{
__device__ float operator ()(float c_b, float c_s) const
{
if (c_s < 1.0f)
return min(1.0f, c_b / (1.0f - c_s));
return 1.0f;
}
};
struct ColorBurn
{
__device__ float operator ()(float c_b, float c_s) const
{
if (c_s > 0.0f)
return 1.0f - min(1.0f, (1.0f - c_b) / c_s);
return 0.0f;
}
};
struct HardLight
{
__device__ float operator ()(float c_b, float c_s) const
{
if (c_s <= 0.5f)
return Multiply()(c_b, 2.0f * c_s);
return Screen()(c_b, 2.0f * c_s - 1.0f);
}
};
class SoftLight
{
__device__
static float D(float x)
{
if (x <= 0.25)
return ((16 * x - 12) * x + 4) * x;
return sqrt(x);
}
public:
__device__ float operator ()(float c_b, float c_s) const
{
if (c_s <= 0.5f)
return c_b - (1.0f - 2.0f * c_s) * c_b * (1 - c_b);
return c_b + (2.0f * c_s - 1.0f) * (D(c_b) - c_b);
}
};
struct Overlay
{
__device__ float operator ()(float c_b, float c_s) const
{
return HardLight()(c_s, c_b);
}
};
struct Difference
{
__device__ float operator ()(float c_b, float c_s) const
{
return abs(c_b - c_s);
}
};
struct Exclusion
{
__device__ float operator ()(float c_b, float c_s) const
{
return c_b + c_s - 2.0f * c_b * c_s;
}
};
//class NonSeparableBlendOp
//{
//protected:
// static __device__ float lum(const math::float3& c)
// {
// return 0.3f * c.x + 0.59f * c.y + 0.11f * c.z;
// }
//
// static __device__ float sat(const math::float3& c)
// {
// return max(c.x, max(c.y, c.z)) - min(c.x, min(c.y, c.z));
// }
//
// static __device__ math::float3 clipColor(const math::float3& c)
// {
// float l = lum(c);
// float n = min(c.x, min(c.y, c.z));
// float x = max(c.x, min(c.y, c.z));
//
// if (n < 0.0f)
// return {
// l + (((c.x - l) * l) / (l - n)),
// l + (((c.y - l) * l) / (l - n)),
// l + (((c.z - l) * l) / (l - n))
// };
// if (x > 1.0f)
// return {
// l + (((c.x - l) * (1.0f - l)) / (x - l)),
// l + (((c.y - l) * (1.0f - l)) / (x - l)),
// l + (((c.z - l) * (1.0f - l)) / (x - l))
// };
// return c;
// }
//
// static __device__ math::float3 setLum(const math::float3& c, float l)
// {
// float d = l - lum(c);
// return clipColor({c.x + d, c.y + d, c.z + d});
// }
//
// static __device__ math::float3 setSat(const math::float3& c, float l)
// {
// (c.x < c.y) && (c.x < c.z)
//
// int min = 0U;
//
// if (c.x < c.y)
//
//
// if (c_max < c_mid)
// swap(c_mid, c_max);
//
// if (c_mid < c_min)
// swap(c_min, c_mid);
//
// if (c_max > c_min)
//
// }
//};
//
//class BlendingHue : NonSeparableBlendOp
//{
//public:
// __device__
// math::float4 operator ()(const math::float4& src, const math::float4& dest) const
// {
// return{ T::operator ()(dest.x, src.x), T::operator ()(dest.y, src.y), T::operator ()(dest.z, src.z), src.w };
// }
//};
struct WaterBlending
{
__device__
math::float4 operator ()(const math::float4& src, const math::float4& dest) const
{
return 0.5f * src + 0.5f * dest;
}
};
__device__
static math::float3 opposite_color(const math::float3& src, const math::float3& grey_src, const math::float3& comp, const math::float3& grey_comp)
{
math::float3 vec = normalize(grey_src - src);
math::float3 comp_vec = comp - grey_comp;
float vec_len = sqrt(dot(comp_vec, comp_vec));
return grey_comp + vec_len * vec;
}
__device__
static math::float3 greypoint(const math::float3& val)
{
const math::float3x3 toXYZ =
{
0.4124f, 0.3576f, 0.1805f,
0.2126f, 0.7152f, 0.0722f,
0.0193f, 0.1192f, 0.9505f
};
math::float3 xyz = (toXYZ * val);
float Xn = 0.95047f;
float Zn = 1.08883f;
xyz.x = Xn*xyz.y;
xyz.z = Zn*xyz.y;
const math::float3x3 toRGB =
{
3.2406f, -1.5372f, -0.4986f,
-0.9689f, 1.8758f, 0.0415f,
0.0557f, -0.2040f, 1.0570f
};
return toRGB * xyz;
}
__device__
static float hue(const math::float3& val)
{
return atan2(sqrt(3.f) * (val.y - val.z), 2.f * val.x - val.y - val.z);
}
__device__
static bool eq_hue(const math::float3& v1, const math::float3& grey_1, const math::float3& v2, const math::float3& grey_2, const float epsilon)
{
//math::float3 vec1 = normalize(v1 - grey_1);
//math::float3 vec2 = normalize(v2 - grey_2);
//return dot(vec1, vec2) > (1.f - epsilon);
math::float2 vec1 = { sqrtf(3) * (v1.y - v1.z), 2 * v1.x - v1.y - v1.z };
vec1 = normalize(vec1);
math::float2 vec2 = { sqrtf(3) * (v2.y - v2.z), 2 * v2.x - v2.y - v2.z };
vec2 = normalize(vec2);
return dot(vec1, vec2) > (1.f - epsilon);
}
struct BlendBlending
{
__device__
math::float4 operator ()(const math::float4& src, const math::float4& dest) const
{
//classic
//return 0.5f * src + 0.5f * dest;
float alpha = sin(0.2f*uniform[0]);
alpha = alpha*alpha;
math::float3 c1 = alpha * src.xyz();
math::float3 c2 = (1.f - alpha) * dest.xyz();
math::float3 cnew;
math::float3 grey_c1 = greypoint(c1);
math::float3 grey_c2 = greypoint(c2);
float epsilon = 0.01f;
if (eq_hue(c1, grey_c1, c2, grey_c2, epsilon))
{
cnew = c1 + c2;
}
else
{
math::float3 c2_ = opposite_color(c1, grey_c1, c2, grey_c2);
cnew = c1 + c2_;
math::float3 grey_cnew = greypoint(cnew);
if (!eq_hue(c1, grey_c1, cnew, grey_cnew, epsilon))
{
math::float3 c1_ = opposite_color(c2, grey_c2, c1, grey_c1);
cnew = c1_ + c2;
}
}
return math::float4(cnew, 1.f);
}
};
//#define delta (6.f / 29.f)
//#define delta2 delta*delta
//#define delta3 delta2*delta
//
//#define Xn 0.95047f
//#define Zn 1.08883f
//
//static __device__ float f(float t)
//{
// if (t > delta3)
// {
// return powf(t, 1.f / 3.f);
// }
// else
// {
// return (t / (3 * delta2)) + (4.f / 29.f);
// }
//}
//
//static __device__ float f1(float t)
//{
// if (t > delta)
// {
// return t*t*t;
// }
// else
// {
// return (3 * delta2) * (t - (4.f / 29.f));
// }
//}
//
//static __device__ math::float3 to_lab2(const math::float3& val)
//{
// const math::float3x3 toXYZ =
// {
// 0.4124f, 0.3576f, 0.1805f,
// 0.2126f, 0.7152f, 0.0722f,
// 0.0193f, 0.1192f, 0.9505f
// };
//
// math::float3 xyz = (toXYZ * val);
//
// float L = 1.16f * f(xyz.y) - 0.16f;
// float a = 5.f * (f(xyz.x / Xn) - f(xyz.y));
// float b = 2.f * (f(xyz.y) - f(xyz.z / Zn));
//
// return { L, a, b };
//}
//
//static __device__ math::float3 to_rgb2(const math::float3& v)
//{
// float L = v.x;
// float a = v.y;
// float b = v.z;
//
// math::float3 xyz;
//
// xyz.x = Xn * f1(((L + 0.16f) / 1.16f) + (a / 5.f));
// xyz.y = f1((L + 0.16f) / 1.16f);
// xyz.z = Zn * f1(((L + 0.16f) / 1.16f) - (b / 2.f));
//
// const math::float3x3 toRGB =
// {
// 3.2406f, -1.5372f, -0.4986f,
// -0.9689f, 1.8758f, 0.0415f,
// 0.0557f, -0.2040f, 1.0570f
// };
//
// return toRGB * xyz;
//}
//
//static __device__ math::float3 opposite_color2(const math::float3& src, const math::float3& dst)
//{
// math::float3 res;
//
// math::float2 v = { -src.y, -src.z };
// v = normalize(v);
//
// float len = sqrt(dst.y*dst.y + dst.z*dst.z);
//
// res.x = dst.x;
// res.y = len * v.x;
// res.z = len * v.y;
// return res;
//}
//
//static __device__ bool eq_hue2(const math::float3& a, const math::float3& b, float epsilon)
//{
// math::float2 v = normalize(math::float2(a.y, a.z ));
// math::float2 w = normalize(math::float2(b.y, b.z ));
//
// return dot(v, w) > (1.f - epsilon);
//}
//
struct IsoBlendBlending
{
// __device__
// math::float4 operator ()(const math::float4& src, const math::float4& dest)
// {
//
// math::float4 dst = dest;
//
// //return (src.w * src + (1.f - src.w) * dst);
//
// float alpha = src.w;
//
// math::float3 c1_rgb = alpha * src.xyz();
// math::float3 c2_rgb = (1.f - alpha) * dst.xyz();
//
// math::float3 c1_lab = to_lab2(c1_rgb);
// math::float3 c2_lab = to_lab2(c2_rgb);
//
// math::float3 cnew_rgb;
//
// float epsilon = 0.01f;
//
// if (eq_hue2(c1_lab, c2_lab, epsilon))
// {
// cnew_rgb = c1_rgb + c2_rgb;
// }
// else
// {
// math::float3 c2_lab_ = opposite_color2(c1_lab, c2_lab);
// math::float3 cnew_lab = { 0.5f * (c1_lab.x + c2_lab_.x), c1_lab.y + c2_lab_.y, c1_lab.z + c2_lab_.z };
// //cnew_rgb = c1_rgb + to_rgb2(c2_lab_);
// cnew_rgb = to_rgb2(cnew_lab);
//
// if (!eq_hue2(c1_lab, cnew_lab, epsilon))
// {
// math::float3 c1_lab_ = opposite_color2(c2_lab, c1_lab);
// math::float3 cnew_lab = { 0.5f * (c1_lab_.x + c2_lab.x), c1_lab_.y + c2_lab.y, c1_lab_.z + c2_lab.z };
// //cnew_rgb = to_rgb2(c1_lab_) + c2_rgb;
// cnew_rgb = to_rgb2(cnew_lab);
// }
// }
//
// return math::float4(cnew_rgb, 1.f);
// }
__device__
math::float4 operator ()(const math::float4& src, const math::float4& dest) const
{
//math::float4 dst = (dest.x == 1 && dest.y == 1 && dest.z == 1 && dest.w == 1 ? math::float4(0, 0, 0, 1) : dest);
math::float4 dst = dest;
return (src.w * src + (1.f - src.w) * dst);
//float alpha = src.w;
////math::float3 c1 = alpha * src.xyz();
////math::float3 c2 = (1.f - alpha) * dst.xyz();
//math::float3 c1 = alpha * src.xyz();
//math::float3 c2 = (1.f - alpha) * dst.xyz();
//math::float3 cnew;
//math::float3 grey_c1 = greypoint(c1);
//math::float3 grey_c2 = greypoint(c2);
//float epsilon = 0.01f;
//if (eq_hue(c1, grey_c1, c2, grey_c2, epsilon))
//{
// cnew = c1 + c2;
//}
//else
//{
// math::float3 c2_ = opposite_color(c1, grey_c1, c2, grey_c2);
// cnew = c1 + c2_;
// math::float3 grey_cnew = greypoint(cnew);
// if (!eq_hue(c1, grey_c1, cnew, grey_cnew, epsilon))
// {
// math::float3 c1_ = opposite_color(c2, grey_c2, c1, grey_c1);
// cnew = c1_ + c2;
// }
//}
//return math::float4(cnew, 1.f);
}
};
#endif // INCLUDED_CURE_BLEND_SHADERS
|
the_stack
|
DEV static int_t terminal_U_penalty(const fbase_t *s, const int i, const int j, fparam_t p)
{
return s[i] == U || s[j] == U ? p->terminal_AU_penalty : 0;
}
DEV static int_t dangle_3p_energy(const fbase_t *s,
const int i,
const int j,
const int ip1,
fparam_t p)
{
return p->dangle_3p[s[i]][s[j]][s[ip1]] + terminal_U_penalty(s,i,j,p);
}
DEV static int_t dangle_5p_energy(const fbase_t *s,
const int i,
const int j,
const int jm1,
fparam_t p)
{
return p->dangle_5p[s[i]][s[j]][s[jm1]] + terminal_U_penalty(s,i,j,p);
}
DEV static int_t terminal_stack(const fbase_t *s,
const int i,
const int j,
const int ip1,
const int jm1,
fparam_t p)
{
return p->tstack[s[i]][s[j]][s[ip1]][s[jm1]] + terminal_U_penalty(s,i,j,p);
}
DEV static int_t terminal_stack_multibranch(const fbase_t *s,
const int i,
const int j,
const int ip1,
const int jm1,
fparam_t p)
{
return p->tstackm[s[i]][s[j]][s[ip1]][s[jm1]] + terminal_U_penalty(s,i,j,p);
}
DEV static const int_t *lookup_find(const fbase_t *s, const int d, fparam_t p)
{
int i;
switch (d) {
case 3:
for (i = 0; i < p->ntriloop; i++)
if (sequences_match(s, p->triloop[i].seq, d+2))
return &p->triloop[i].val;
break;
case 4:
for (i = 0; i < p->ntloop; i++)
if (sequences_match(s, p->tloop[i].seq, d+2))
return &p->tloop[i].val;
break;
case 6:
for (i = 0; i < p->nhexaloop; i++)
if (sequences_match(s, p->hexaloop[i].seq, d+2))
return &p->hexaloop[i].val;
break;
}
return 0;
}
/***
* Energy of a hairpin loop with d unpaired bases, d = j-i-1
* s[i] is paired with s[j]
* s[i+1] is mismatched with s[j-1]
***/
DEV static int_t hairpin_loop_energy(const fbase_t *s,
const int i,
const int j,
const int d,
fparam_t p)
{
/* Lookup tables for special hairpin loops */
const int_t *val;
if ((val = lookup_find(&s[i],d,p)))
return *val;
/* Hairpin loop initiation penalty */
int_t e;
if (d > LOOP_MAX)
e = (int_t) (p->hairpin_loop_initiation[LOOP_MAX] + p->prelog *
LOG((float) d / LOOP_MAX));
else
e = p->hairpin_loop_initiation[d];
if (d == 3) {
if (contains_only_base(C,d,&s[i+1]))
e += p->c_hairpin_of_3;
e += terminal_U_penalty(s,i,j,p);
} else {
e += p->tstackh[s[i]][s[j]][s[i+1]][s[j-1]];
if (contains_only_base(C,d,&s[i+1]))
e += p->c_hairpin_slope*d + p->c_hairpin_intercept;
}
if (s[i] == G && s[j] == U && i > 1 && s[i-1] == G && s[i-2] == G)
e += p->bonus_for_GGG_hairpin;
return e;
}
DEV static int_t real_min(int_t a, int_t b) { return a < b ? a : b; }
/***
* Energy of an internal/bulge loop with d1, d2 unpaired bases,
* d1 = ip-i-1, d2 = j-jp-1
* s[i] is paired with s[j]
* s[i+1] is mismatched with s[j-1]
* s[ip-1] is mismatched with s[jp+1]
* s[ip] is paired with s[jp]
***/
DEV static int_t alternative_bulge_loop_correction (const int n, const fbase_t *s,
const int i,
const int ip) //i<ip
{
int count = 1;
int k;
//float result;
if (i!=n-1){
k = i;
while (k>=0 && s[k]==s[i+1]) {
count++;
k--;
}
k = ip;
while (k<=n-1 && (s[k]==s[i+1])) {
count++;
k++;
}
}
return (int_t) (-1.0f * RT * conversion_factor * log ((float) count));
}
DEV static int_t internal_loop_energy(const fbase_t *s,
const int n,
const int i,
const int j,
const int ip,
const int jp,
const int d1,
const int d2,
fparam_t p)
{
/* Bulge loops */
if (d1 == 0 || d2 == 0) {
int_t e = p->bulge_loop_initiation[d1+d2];
if (d1 == 1 || d2 == 1) { /* single-nucleotide bulge */
e += p->stack[s[i]][s[j]][s[ip]][s[jp]];
if (d1==0) e += alternative_bulge_loop_correction(n,s,jp,j); //correction for multiple equivalent bulge loops
//else e += alternative_bulge_loop_correction(s,i,jp);
else e += alternative_bulge_loop_correction(n,s,i,ip);
if ((d1 == 1 && s[i+1] == C && (s[i] == C || s[i+2] == C)) ||
(d2 == 1 && s[j-1] == C && (s[j] == C || s[j-2] == C)))
e += p->Bonus_for_Single_C_bulges_adjacent_to_C;
} else {
e += terminal_U_penalty(s,i,j,p);
e += terminal_U_penalty(s,ip,jp,p);
}
return e;
}
/* Small internal loops */
if (d1 == 1 && d2 == 1)
return p->int11[s[i]][s[i+1]][s[i+2]][s[j-2]][s[j-1]][s[j]];
if (d1 == 2 && d2 == 2)
return p->int22[s[i]][s[ip]][s[j]][s[jp]][s[i+1]][s[i+2]][s[j-1]][s[j-2]];
if (d1 == 1 && d2 == 2)
return p->int21[s[i]][s[j]][s[i+1]][s[j-1]][s[jp+1]][s[ip]][s[jp]];
if (d1 == 2 && d2 == 1)
return p->int21[s[jp]][s[ip]][s[jp+1]][s[ip-1]][s[i+1]][s[j]][s[i]];
/* Larger internal loops */
tab4_t *sp;
if (d1 == 1 || d2 == 1)
sp = &p->tstacki1n;
else if ((d1 == 2 && d2 == 3) || (d1 == 3 && d2 == 2))
sp = &p->tstacki23;
else
sp = &p->tstacki;
return p->internal_loop_initiation[d1+d2] +
real_min(p->fm_array_first_element*abs(d1-d2), p->maximum_correction) +
(*sp)[s[i]][s[j]][s[i+1]][s[j-1]] +
(*sp)[s[jp]][s[ip]][s[jp+1]][s[ip-1]];
}
DEV static int_t coaxial_flush(const fbase_t *s,
const int i,
const int j,
const int ip,
const int jp,
fparam_t p)
{
return terminal_U_penalty(s,i,j,p) + terminal_U_penalty(s,ip,jp,p) +
p->coaxial[s[i]][s[j]][s[ip]][s[jp]];
}
DEV static int_t coaxial_mismatch1(const fbase_t *s,
const int i,
const int j,
const int ip,
const int jp,
fparam_t p)
{
return terminal_U_penalty(s,i,j,p) + terminal_U_penalty(s,ip,jp,p) +
p->tstackcoax[s[j]][s[i]][s[j+1]][s[i-1]] +
p->coaxstack[s[j+1]][s[i-1]][s[ip]][s[jp]];
}
DEV static int_t coaxial_mismatch2(const fbase_t *s,
const int i,
const int j,
const int ip,
const int jp,
fparam_t p)
{
return terminal_U_penalty(s,i,j,p) + terminal_U_penalty(s,ip,jp,p) +
p->tstackcoax[s[jp]][s[ip]][s[jp+1]][s[ip-1]] +
p->coaxstack[s[j]][s[i]][s[j+1]][s[jp+1]];
}
DEV static void free_energy_min(int_t *a, const int_t b)
{
if(*a>b) *a = b;
}
DEV static int int_min(int a, int b) { return a < b ? a : b; }
DEV static int_t int_t_min(int_t a, int_t b) { return a < b ? a : b; }
DEV HOST static int ind(int i, int j, int n)
{
return i*n + j;
}
DEV HOST inline static int cp(int i, int j, const fbase_t *s)
{
return j-i-1 >= LOOP_MIN && is_canonical_pair(s[i],s[j]);
}
DEV HOST inline static int can_pair(int i, int j, int n, const fbase_t *s)
{
if (j < i) {
const int tmp = i;
i = j;
j = tmp;
}
return cp(i,j,s) && ((i > 0 && j < n-1 && cp(i-1,j+1,s)) || cp(i+1,j-1,s));
}
DEV HOST inline static int not_isolated(int i,int j,int n, const fbase_t *s)
{
if (j < i) {
const int tmp = i;
i = j;
j = tmp;
}
return is_canonical_pair(s[i],s[j]) && ((i > 0 && j < n-1 && cp(i-1,j+1,s)) || cp(i+1,j-1,s));
}
DEV static int wrap(int i, int n)
{
return i >= n ? i-n : i;
}
DEV static int is_exterior(int i, int j)
{
return j < i;
}
DEV static int is_interior(int i, int j)
{
return i < j;
}
DEV static int_t *array_val(int_t *a, int i, int j, int n, const fbase_t *s)
{
return can_pair(i,j,n,s) ? &a[ind(i,j,n)] : 0;
}
#ifdef __CUDACC__
#define ISTART blockIdx.x
#define IINC gridDim.x
#else
#define ISTART 0
#define IINC 1
#endif
//MFE recursions begin
//TODO
//figure out source of differences in arrays
//integrate with rnastructure traceback
//when recursions work on the cpu:
//do the same thing with the calculation on the GPU
GLOBAL static void calc_V_hairpin_and_V_stack
(int d,
int n,
const fbase_t *__restrict s,
int_t *__restrict v,
const fparam_t __restrict p)
{
int i;
for (i = ISTART; i < n; i += IINC) {
const int jtmp = i+d+1;
const int j = wrap(jtmp,n);
if((is_interior(i,j) && !can_pair(i,j,n,s)) || (is_exterior(i,j) && (!is_canonical_pair(s[i],s[j]) ))){
v[ind(i,j,n)] = INF; //this is important
continue;
}
int_t vij = INF; //temp variable to fold free energy sum
if (i != n-1 && j != 0) {
/* hairpin loop */
if (is_interior(i,j))
vij = hairpin_loop_energy(s,i,j,d,p);
/* stack */
if (can_pair(i+1,j-1,n,s) && !((is_interior(i,j)) && (d <= LOOP_MIN-2)))//-2???
free_energy_min(&vij, p->stack[s[i]][s[j]][s[i+1]][s[j-1]] + v[ind(i+1,j-1,n)]);
}
v[ind(i,j,n)] = vij;
}
}
#ifdef __CUDACC__
#define NTHREAD 256
#define SQRT_NTHREAD 16
DEV static void free_energy_min_reduce(int_t *x, int tid, int nt)
{
__shared__ int_t buf[NTHREAD];
buf[tid] = *x;
for (nt /= 2, __syncthreads(); nt > 0; nt /= 2, __syncthreads())
if (tid < nt)
free_energy_min(&buf[tid], buf[tid+nt]);
if (tid == 0)
*x = buf[0];
}
#endif /* __CUDACC__ */
GLOBAL static void calc_V_bulge_internal (
int d,
int n,
const fbase_t *__restrict s,
int_t *__restrict v,
const fparam_t __restrict p)
{
// Vbi(i,j) = min[V(k,l)+ Ebulge/int(i,j,k,l)] where i<k<l<j, i!=i+1, and j!=j-1
int i;
for (i = ISTART; i < n; i += IINC) {
const int jtmp = i+d+1;
const int j = wrap(jtmp,n);
if ((is_exterior(i,j) && i-j <= LOOP_MIN) ||
(is_interior(i,j) && d <= LOOP_MIN+2) ||
!can_pair(i,j,n,s))
continue;
int_t vij = INF;
#ifdef __CUDACC__
const int d1start = threadIdx.x;
const int d1inc = blockDim.x;
#else
const int d1start = 0;
const int d1inc = 1;
#endif
const int dmax = int_min(LOOP_MAX, d-2);
const int d1max = int_min(dmax, n-i-2);
int d1;
for (d1 = d1start; d1 <= d1max; d1 += d1inc) { //d1start is threadid, d1max is max loop size
const int ip = i+d1+1; //ip depends on thread's ID in x dimension
const int d2max = int_min(dmax-d1, j-1);
#ifdef __CUDACC__
const int d2start = d1 > 0 ? threadIdx.y : threadIdx.y + 1;
const int d2inc = blockDim.y;
#else
const int d2start = d1 > 0 ? 0 : 1;
const int d2inc = 1;
#endif
int d2;
for (d2 = d2start; d2 <= d2max; d2 += d2inc) {
const int jp = j-d2-1;//jp depends on thread's ID in the y dimension
if (can_pair(ip,jp,n,s))
free_energy_min(&vij, internal_loop_energy(s,n,i,j,ip,jp,d1,d2,p) + v[ind(ip,jp,n)]);
}
}
#ifdef __CUDACC__
const int tid = threadIdx.x * blockDim.y + threadIdx.y;
free_energy_min_reduce(&vij, tid, blockDim.x*blockDim.y); //after we have 1 value per thread, do parallel reduction
if (tid != 0)
continue;
#endif
free_energy_min(&v[ind(i,j,n)], vij); //write vij to V
}
}
GLOBAL static void calc_V_multibranch (
int d,
int n,
const fbase_t *__restrict s,
int_t *__restrict v,
const int_t *__restrict wm,
const fparam_t __restrict p)
{
// Vmb(i,j) = min[WM(i+1,j-1)+c+a, WM(i+2,j-1)+Edangle5'+a+b+c, WM(i+1,j-2)+Edangle3'+a+b+c, WM(i+2,j-2)+Edangleboth+a+2b+c,
// min_over_k[ V(i+1,k) + min[W(k+1,j-1), WM(k+1,j-1)]] + a+2c+Eflushcoax(i to j, i+1 to k) , //various coaxial stacking possibilities
// min_over_k[ V(k,j-1) + min[W(i+1,k-1), WM(i+1,k-1)]] + a+2c+Eflushcoax(i to j, k to j-1) ,
// min_over_k[ V(i+2,k) + min[W(k+2,j-1), WM(k+2,j-1)]] + a+2c+2b+Emismatch3'coax(i to j, i+2 to k) ,
// min_over_k[ V(i+2,k) + min[W(k+1,j-2), WM(k+1,j-2)]] + a+2c+2b+Emismatch5'coax(i to j, i+2 to k) ,
// min_over_k[ V(k,j-2) + min[W(i+2,k-1), WM(i+2,k-1)]] + a+2c+2b+Emismatch3'coax(i to j, k to j-2) ,
// min_over_k[ V(k,j-2) + min[W(i+1,k-2), WM(i+1,k-2)]] + a+2c+2b+Emismatch5'coax(i to j, k to j-2) ]
// where i < k < j
//V(i,j) = min(V(i,j), Vmb(i,j))
int i;
for (i = ISTART; i < n; i += IINC) {
const int jtmp = i+d+1;
const int j = wrap(jtmp,n);
if ((is_exterior(i,j) && i-j <= LOOP_MIN) || !can_pair(i,j,n,s))
continue;
int_t vij=INF;
if (d > 2*LOOP_MIN + 3 && i != n-1 && j != 0) { //if i and j are far enough apart to close a MBL..
free_energy_min(&vij, wm[ind(i+1,j-1,n)] + terminal_U_penalty(s,i,j,p) + p->a + p->c);
if (i != n-2)
free_energy_min(&vij, wm[ind(i+2,j-1,n)] + dangle_3p_energy(s,i,j,i+1,p) + p->a + p->b + p->c);
if (j != 1)
free_energy_min(&vij, wm[ind(i+1,j-2,n)] + dangle_5p_energy(s,i,j,j-1,p) + p->a + p->b + p->c);
if (i != n-2 && j != 1)
free_energy_min(&vij, wm[ind(i+2,j-2,n)] + terminal_stack_multibranch(s,i,j,i+1,j-1,p) + p->a + 2*p->b + p->c);
}
free_energy_min(&v[ind(i,j,n)], vij);
}
}
GLOBAL static void calc_V_exterior (
int d,
int n,
const fbase_t *__restrict s,
int_t *__restrict v,
const int_t *__restrict w5,
const int_t *__restrict w3,
const fparam_t __restrict p)
{
// Vexterior(i,j) = min[ W3(i+1)+W3(j-1-N), W3(i+2)+W5(j-1-N)+E5'dangle, W3(i+1)+W5(j-2-N)+E3'dangle, W3(i+2)+W5(j-2-N)+Emismatch,
// min_over_k[ V(i+1,k) + W3(k+1) + W5(j-1-N) + Eflushcoax ],
// min_over_k[ V(k,j-1-N) + W3(i+1) + W5(k-1) + E ],
// min_over_k[ V(i+2,k-2) + W3(k+1) + W5(j-1-N) + E ],
// min_over_k[ V(i+2,k-1) + W3(k+1) + W5(j-2-N) + E ],
// min_over_k[ V(k+1,j-2-N) + W3(i+1) + W5(k-1) + E ],
// min_over_k[ V(k,j-2-N) + W3(i+2) + W5(k-1) + E ] ]
int i;
for (i = ISTART; i < n; i += IINC) {
const int jtmp = i+d+1;
const int j = wrap(jtmp,n);
if ( is_interior(i,j))
continue;
int_t vij = INF; //temp variable to fold free energy sum
if(is_canonical_pair(s[i],s[j])&¬_isolated(i,j,n,s)){
free_energy_min(&vij, w3[i+1] + w5[j-1] + terminal_U_penalty(s,i,j,p));
if (i != n-1)
free_energy_min(&vij, w3[i+2] + w5[j-1] + dangle_3p_energy(s,i,j,i+1,p));
if (j != 0)
free_energy_min(&vij, w3[i+1] + w5[j-2] + dangle_5p_energy(s,i,j,j-1,p));
if (i != n-1 && j != 0)
free_energy_min(&vij, w3[i+2] + w5[j-2] + terminal_stack(s,i,j,i+1,j-1,p));
}
free_energy_min(&v[ind(i,j,n)], vij);
}
}
GLOBAL static void calc_W (
int d,
int n,
const fbase_t *__restrict s,
int_t *__restrict v,
int_t *__restrict w,
const fparam_t __restrict p)
{
//W(i,j) = min[V(i,j)+c,V(i+1,j)+Edangle5',
// V(i,j+1)+Edangle3',
// V(i+1,j+1)+Edangleboth]
int i;
for (i = ISTART; i < n; i += IINC) {
const int jtmp = i+d+1; // max: n-1+n-2+1
const int j = wrap(jtmp,n); // n-2
int_t wij = INF; //temp variable to fold free energy sum
int_t* v_temp;
//consider adding nucleotide to existing loop
if(d>0){
if (i!=n-1)
free_energy_min(&wij, w[ind(i+1,j,n)] + p->b);
if(j!=0)
free_energy_min(&wij, w[ind(i,j-1,n)] + p->b);
}
if((is_interior(i,j) && (d>LOOP_MIN-1))){
v_temp = array_val(v,i,j,n,s);
free_energy_min(&wij, (v_temp? *v_temp:INF) + terminal_U_penalty(s,i,j,p) + p->c);
if(j!=0){
v_temp = array_val(v,i,j-1,n,s);
free_energy_min(&wij, (v_temp? *v_temp:INF) + dangle_3p_energy(s,j-1,i,j,p) + p->b + p->c);
}
if(i!=n-1) {
v_temp = array_val(v,i+1,j,n,s);
free_energy_min(&wij, (v_temp? *v_temp:INF) + dangle_5p_energy(s,j,i+1,i,p) + p->b + p->c);
}
if((i!=n-1) && (j!=0)){
v_temp = array_val(v,i+1,j-1,n,s);
free_energy_min(&wij, (v_temp? *v_temp:INF) + terminal_stack_multibranch(s,j-1,i+1,j,i,p) + 2*p->b + p->c);
}
}
if(is_exterior(i,j)){
free_energy_min(&wij, v[ind(i,j,n)] + terminal_U_penalty(s,i,j,p) + p->c);
if(j!=0){
free_energy_min(&wij, v[ind(i,j-1,n)] + dangle_3p_energy(s,j-1,i,j,p) + p->b + p->c);
}
if(i!=n-1) {
free_energy_min(&wij, v[ind(i+1,j,n)] + dangle_5p_energy(s,j,i+1,i,p) + p->b + p->c);
}
if((i!=n-1) && (j!=0)){
free_energy_min(&wij, v[ind(i+1,j-1,n)] + terminal_stack_multibranch(s,j-1,i+1,j,i,p) + 2*p->b + p->c);
}
}
w[ind(i,j,n)] = wij;
}
}
GLOBAL static void calc_WM (
int d,
int n,
const fbase_t *__restrict s,
int_t *__restrict w,
int_t *__restrict wm,
const fparam_t __restrict p)
{
//WM(i,j) = min[W(i,k)+W(k+1,j),
// V(i,k)+V(k+1,j)+2c+Eflushcoax,
// V(i,k)+V(k+2,j-1)+2c+Ecoax5'mismatch,
// V(i+1,k)+V(k+2,j)+2c+Ecoax3'mismatch]
int i;
for (i = ISTART; i < n; i += IINC) {
const int jtmp = i+d+1;
const int j = wrap(jtmp,n);
int_t tmp = INF;
//don't need to calculate every WM
if((is_interior(i,j) && (j-i-1 <= 2*LOOP_MIN+2))){//condition copied verbatim from algorithm.cpp
wm[ind(i,j,n)]=INF;
continue;
}
#ifdef __CUDACC__
const int kstart = i + threadIdx.x;
const int kinc = blockDim.x;
#else
const int kstart = i;
const int kinc = 1;
#endif
int ktmp;
for (ktmp = kstart; ktmp < jtmp; ktmp += kinc) {
if (ktmp != n-1) {
const int k = wrap(ktmp,n);
free_energy_min(&tmp, w[ind(i,k,n)] + w[ind(k+1,j,n)]);
}
}
if(d>0){
if (i!=n-1)
free_energy_min(&tmp, wm[ind(i+1,j,n)] + p->b);
if(j!=0)
free_energy_min(&tmp, wm[ind(i,j-1,n)] + p->b);
}
#ifdef __CUDACC__
free_energy_min_reduce(&tmp, threadIdx.x, blockDim.x);
if (threadIdx.x != 0)
continue;
#endif
wm[ind(i,j,n)] = tmp;
free_energy_min(&w[ind(i,j,n)],tmp);
}
}
GLOBAL static void calc_coaxial (
int d,
int n,
const fbase_t *__restrict s,
int_t *__restrict v,
const int_t *__restrict w,
const int_t *__restrict w5,
const int_t *__restrict w3,
const fparam_t __restrict p)
{
int i;
for (i = ISTART; i < n; i += IINC) {
const int jtmp = i+d+1;
const int j = wrap(jtmp,n);
if ((is_exterior(i,j) && i-j <= LOOP_MIN) || !can_pair(i,j,n,s))
continue;
const int_t *v1;
int_t vij = INF;
/* exterior */
if (is_exterior(i,j)) {
int k, kstart;
#ifdef __CUDACC__
kstart = threadIdx.x;
const int kinc = blockDim.x;
#else
kstart = 0;
const int kinc = 1;
#endif
for (k = kstart; k < j - LOOP_MIN; k += kinc) {
if ((v1 = array_val(v,k,j-1,n,s)))
free_energy_min(&vij, w3[i+1] + w5[k-1] + coaxial_flush(s,k,j-1,j,i,p) + (*v1));
if (j-2 >= 0) {
if (i < n-1 && (v1 = array_val(v,k,j-2,n,s)))
free_energy_min(&vij, w3[i+2] + w5[k-1] + coaxial_mismatch2(s,k,j-2,j,i,p) + (*v1));
if ((v1 = array_val(v,k+1,j-2,n,s)))
free_energy_min(&vij, w3[i+1] + w5[k-1] + coaxial_mismatch1(s,k+1,j-2,j,i,p) + (*v1));
}
}
#ifdef __CUDACC__
kstart = i+LOOP_MIN+1 + threadIdx.x;
#else
kstart = i+LOOP_MIN+1;
#endif
for (k = kstart; k < n; k += kinc) {
if ((v1 = array_val(v,i+1,k,n,s)))
free_energy_min(&vij, w3[k+1] + w5[j-1] + coaxial_flush(s,j,i,i+1,k,p) + (*v1));
if (j > 0 && (v1 = array_val(v,i+2,k,n,s)))
free_energy_min(&vij, w3[k+1] + w5[j-2] + coaxial_mismatch1(s,j,i,i+2,k,p) + (*v1));
if ((v1 = array_val(v,i+2,k-1,n,s)))
free_energy_min(&vij, w3[k+1] + w5[j-1] + coaxial_mismatch2(s,j,i,i+2,k-1,p) + (*v1));
}
} /* end exterior */
/* multibranch */
if (d > 2*LOOP_MIN + 3 && i != n-1 && j != 0) {
int ktmp;
#ifdef __CUDACC__
int ktmpstart = i+2 + threadIdx.x;
const int ktmpinc = blockDim.x;
#else
int ktmpstart = i+2;
const int ktmpinc = 1;
#endif
for (ktmp = ktmpstart; ktmp < jtmp-2; ktmp += ktmpinc) {
const int k = wrap(ktmp,n);
if (k != n-1) {
if ((v1 = array_val(v,i+1,k,n,s)))
free_energy_min(&vij, coaxial_flush(s,j,i,i+1,k,p) + (*v1) + p->a_2c +
w[ind(k+1,j-1,n)]);
if (ktmp+2 < jtmp-1 && i+1 != n-1 && k+1 != n-1 && (v1 = array_val(v,i+2,k,n,s))) {
const int_t tmp = (*v1) + p->a_2b_2c;
free_energy_min(&vij, coaxial_mismatch2(s,j,i,i+2,k,p) + tmp + w[ind(k+2,j-1,n)]);
if (j != 1) {
free_energy_min(&vij, coaxial_mismatch1(s,j,i,i+2,k,p) + tmp + w[ind(k+1,j-2,n)]);
}
}
}
}
#ifdef __CUDACC__
ktmpstart = i+3 + threadIdx.x;
#else
ktmpstart = i+3;
#endif
for (ktmp = ktmpstart; ktmp < jtmp-1; ktmp += ktmpinc) {
const int k = wrap(ktmp,n);
if (k != 0) {
if ((v1 = array_val(v,k,j-1,n,s)))
free_energy_min(&vij, coaxial_flush(s,k,j-1,j,i,p) + (*v1) + p->a_2c +
w[ind(i+1,k-1,n)]);
if (j != 1 && ktmp > i+3 && (v1 = array_val(v,k,j-2,n,s))) {
const int_t tmp = (*v1) + p->a_2b_2c;
if (k != 1)
free_energy_min(&vij, coaxial_mismatch1(s,k,j-2,j,i,p) + tmp + w[ind(i+1,k-2,n)]);
if (i != n-2)
free_energy_min(&vij, coaxial_mismatch2(s,k,j-2,j,i,p) + tmp + w[ind(i+2,k-1,n)]);
}
}
}
} /* end multibranch */
#ifdef __CUDACC__
free_energy_min_reduce(&vij, threadIdx.x, blockDim.x);
if (threadIdx.x != 0)
continue;
#endif
free_energy_min(&v[ind(i,j,n)], vij);
} /* end loop over i */
} /* end calc_coaxial */
GLOBAL static void calc_wl_coax(
int d,
int n,
const fbase_t *__restrict s,
int_t *__restrict v,
int_t *__restrict w,
int_t *__restrict wm,
const fparam_t __restrict p,
int_t *__restrict wca)
{
int i;
for (i = ISTART; i < n; i += IINC) {
const int jtmp = i+d+1;
const int j = wrap(jtmp,n);
if ((is_exterior(i,j) && i-j <= LOOP_MIN) ||
(is_interior(i,j) && d <= 2*LOOP_MIN+1))
continue;
#ifdef __CUDACC__
const int kstart = i+LOOP_MIN+1 + threadIdx.x;
const int kinc = blockDim.x;
#else
const int kstart = i+LOOP_MIN+1;
const int kinc = 1;
#endif
int ktmp;
int_t tmp1 = INF, tmp2 = INF;
for (ktmp = kstart; ktmp < jtmp-LOOP_MIN-1; ktmp += kinc) {
const int k = wrap(ktmp,n);
if (k == n-1) continue;
int_t *v1, *v2;
if ((v1 = array_val(v,i,k,n,s)) && (v2 = array_val(v,k+1,j,n,s))){
free_energy_min(&tmp1, (*v1) + (*v2) + coaxial_flush(s,i,k,k+1,j,p));
}
if (j == 0 || k+1 == n-1) continue;
if (i != n-1 && (v1 = array_val(v,i+1,k,n,s)) && (v2 = array_val(v,k+2,j,n,s))){
free_energy_min(&tmp2, (*v1) + (*v2) + coaxial_mismatch1(s,i+1,k,k+2,j,p));
}
if ((v1 = array_val(v,i,k,n,s)) && (v2 = array_val(v,k+2,j-1,n,s))){
free_energy_min(&tmp2, (*v1) + (*v2) + coaxial_mismatch2(s,i,k,k+2,j-1,p));
}
}
#ifdef __CUDACC__
free_energy_min_reduce(&tmp1, threadIdx.x, blockDim.x);
free_energy_min_reduce(&tmp2, threadIdx.x, blockDim.x);
if (threadIdx.x != 0) continue;
#endif
wca[ind(i,j,n)] = int_t_min(tmp1,tmp2);
free_energy_min(&wm[ind(i,j,n)], tmp1+2*p->c);
free_energy_min(&wm[ind(i,j,n)], tmp2+2*p->b+2*p->c);
free_energy_min(&w[ind(i,j,n)], wm[ind(i,j,n)]);
} /* end loop over i */
} /* end calc_wl_coxial */
GLOBAL static void calc_w5_and_w3 (
int d,
int n,
const fbase_t *__restrict s,
int_t *__restrict v,
int_t *__restrict w5,
int_t *__restrict w3,
const fparam_t __restrict p,
const int_t *__restrict wca)
{
#ifdef __CUDACC__
const int istart = threadIdx.x;
const int iinc = blockDim.x;
#else
const int istart = 0;
const int iinc = 1;
#endif
int_t w5tmp=0,w3tmp = 0;
int i;
int_t* v_temp;
for (i = istart; i + LOOP_MIN <= d; i += iinc) {
if((v_temp = array_val(v,i,d+1,n,s)))
free_energy_min(&w5tmp, w5[i-1] + *v_temp + terminal_U_penalty(s,d+1,i,p)); //the nucleotide thats more 3' has to go first in terminal_U_penalty call
if(d-i>LOOP_MIN){//necessary, or we seg fault because we try to have a pair in a 4mer
if((v_temp = array_val(v,i,d,n,s)))
free_energy_min(&w5tmp, w5[i-1] + *v_temp + dangle_3p_energy(s,d,i,d+1,p));
if((v_temp = array_val(v,i+1,d+1,n,s)))
free_energy_min(&w5tmp, w5[i-1] + *v_temp + dangle_5p_energy(s,d+1,i+1,i,p));
free_energy_min(&w5tmp,w5[i-1] + wca[ind(i,d+1,n)]);
}
if ((d-i>LOOP_MIN+1) && ((v_temp = array_val(v,i+1,d,n,s))))
free_energy_min(&w5tmp, w5[i-1] + *v_temp + terminal_stack(s,d,i+1,d+1,i,p));
if((v_temp = array_val(v,n-d-2,n-i-1,n,s)))
free_energy_min(&w3tmp, w3[n-i] + *v_temp + terminal_U_penalty(s,n-i-1,n-d-2,p));
if((v_temp = array_val(v,n-d-2,n-i-2,n,s)))
free_energy_min(&w3tmp, w3[n-i] + *v_temp + dangle_3p_energy(s,n-i-2,n-d-2,n-i-1,p));
if((n-d-1 != 0) && ((v_temp = array_val(v,n-d-1,n-i-1,n,s))))
free_energy_min(&w3tmp, w3[n-i] + *v_temp + dangle_5p_energy(s,n-i-1,n-d-1,n-d-2,p));
if((n-i-2 != n-1) && (n-d-1 != 0) && ((v_temp = array_val(v,n-d-1,n-i-2,n,s))))
free_energy_min(&w3tmp, w3[n-i] + *v_temp + terminal_stack(s,n-i-2,n-d-1,n-i-1,n-d-2,p));
free_energy_min(&w3tmp,w3[n-i] + wca[ind(n-d-2,n-i-1,n)]);
}
#ifdef __CUDACC__
free_energy_min_reduce(&w5tmp, threadIdx.x, blockDim.x);
free_energy_min_reduce(&w3tmp, threadIdx.x, blockDim.x);
if (threadIdx.x != 0)
return;
#endif
w5[d+1] = w5[d];
w3[n-d-2] = w3[n-d-1];
free_energy_min(&w5[d+1], w5tmp);
free_energy_min(&w3[n-d-2], w3tmp);
} /* end calc_w5_and_w3 */
GLOBAL static void init_w5_and_w3 (int n, int_t *__restrict w5, int_t *__restrict w3)
{
#ifdef __CUDACC__
w5[blockIdx.x] = 0;
w3[blockIdx.x] = 0;
#else
int i;
for(i=0;i<n+1;i++){
w5[i] = 0;
w3[i] = 0;
}
#endif
}
//MFE recursions end
void initialize(int_t* arr,size_t size){
size_t i;
for(i=0;i<size;i++){
arr[i] = INF;
}
}
frna_t frna_new(const char *str, fparam_t par)
{
frna_t p = (frna_t) safe_malloc(sizeof(struct frna));
memset(p, 0, sizeof(struct frna));
const int n = p->n = strlen(str);
p->seq = fsequence_from_string(str);
p->v = (int_t *) safe_malloc(n*n*sizeof(int_t));
p->w = (int_t *) safe_malloc(n*n*sizeof(int_t));
p->wm = (int_t *) safe_malloc(n*n*sizeof(int_t));
p->wca = (int_t *) safe_malloc(n*n*sizeof(int_t));
p->w5 = (int_t *) safe_malloc((n+1)*sizeof(int_t)) + 1;
p->w3 = (int_t *) safe_malloc((n+1)*sizeof(int_t));
initialize(p->v,n*n);
initialize(p->w,n*n);
initialize(p->wm,n*n);
initialize(p->wca,n*n);
#ifdef __CUDACC__ /* do multithreaded fill on GPU */
#define ALLOC(a,sz) CU(cudaMalloc(&a,(sz)*sizeof(int_t)))
int_t *v,*w,*wm,*w5,*w3,*wca;
ALLOC(v,n*n); //best energy of structure closed by pair i,j. j>i: exterior fragment
ALLOC(w,n*n); //best energy of structure from i to j
ALLOC(wm,n*n); //best energy of structure i to j containing 2 or more branches
ALLOC(w5,n+1); //best energy of structure from 1 to i
w5++;//w5 is indexed from 1 -- is this a good idea?
ALLOC(w3,n+1); //best energy of structure from i to numberofbases
ALLOC(wca,n*n);
fparam_t pm;
CU(cudaMalloc(&pm, sizeof(struct fparam)));
CU(cudaMemcpy(pm, par, sizeof(struct fparam), cudaMemcpyHostToDevice));
fbase_t *s;
CU(cudaMalloc(&s,n*sizeof(fbase_t)));
CU(cudaMemcpy(s, p->seq, n*sizeof(fbase_t), cudaMemcpyHostToDevice));
CU(cudaMemcpy(v, p->v, n*n*sizeof(int_t), cudaMemcpyHostToDevice));
CU(cudaMemcpy(w, p->w, n*n*sizeof(int_t), cudaMemcpyHostToDevice));
CU(cudaMemcpy(wm, p->wm, n*n*sizeof(int_t), cudaMemcpyHostToDevice));
init_w5_and_w3<<<n,1>>>(n+1,w5-1,w3);
for (int d = 0; d < n-1; d++) { //for fragment lengths (1 : n)
calc_V_hairpin_and_V_stack<<<n,1>>>(d, n, s, v, pm);
calc_V_bulge_internal<<<n,dim3(SQRT_NTHREAD,SQRT_NTHREAD,1)>>>(d, n, s, v, pm);
calc_V_exterior<<<n,1>>>(d, n, s, v, w5, w3, pm);
calc_V_multibranch<<<n,1>>>(d, n, s, v, wm, pm);
calc_coaxial<<<n,NTHREAD>>>(d, n, s, v, w, w5, w3, pm);
calc_W<<<n,1>>>(d, n, s, v, w, pm);
calc_WM<<<n,NTHREAD>>>(d, n, s, w, wm, pm);
calc_wl_coax<<<n,NTHREAD>>>(d, n, s, v, w, wm, pm, wca);
calc_w5_and_w3<<<1,NTHREAD>>>(d, n, s, v, w5, w3, pm, wca);
}
CU(cudaMemcpy(p->v, v, n*n*sizeof(int_t), cudaMemcpyDeviceToHost));
CU(cudaMemcpy(p->w, w, n*n*sizeof(int_t), cudaMemcpyDeviceToHost));
CU(cudaMemcpy(p->wm, wm, n*n*sizeof(int_t), cudaMemcpyDeviceToHost));
CU(cudaMemcpy(p->w5 - 1, w5 - 1, (n+1)*sizeof(int_t), cudaMemcpyDeviceToHost));
CU(cudaMemcpy(p->w3, w3, (n+1)*sizeof(int_t), cudaMemcpyDeviceToHost));
CU(cudaFree(v));
CU(cudaFree(w5 - 1));
CU(cudaFree(w3));
CU(cudaFree(w));
CU(cudaFree(wm));
CU(cudaFree(pm));
CU(cudaFree(s));
#else /* do serial fill on CPU */
#define ALLOC(a,sz) a = (int_t *) safe_malloc((sz)*sizeof(int_t))
/* ALLOC(v,n*n); //best energy of structure closed by pair i,j. j>i: exterior fragment
ALLOC(w,n*n); //best energy of structure from i to j
ALLOC(wm,n*n); //best energy of structure i to j containing 2 or more branches
ALLOC(w5,n+1); //best energy of structure from 1 to i
w5++;//w5 is indexed from 1 -- is this a good idea?
ALLOC(w3,n+1); //best energy of structure from i to numberofbases
*/
init_w5_and_w3(n,p->w5,p->w3);
int d;
for (d = 0; d < n-1; d++) {
calc_V_hairpin_and_V_stack(d, p->n, p->seq, p->v, par);
calc_V_bulge_internal(d, p->n, p->seq, p->v, par);
calc_V_exterior(d, p->n, p->seq, p->v, p->w5, p->w3, par);
calc_V_multibranch(d, p->n, p->seq, p->v, p->wm, par);
calc_coaxial(d, p->n, p->seq, p->v, p->w, p->w5, p->w3, par);
calc_W(d, p->n, p->seq, p->v, p->w, par);
calc_WM(d, p->n, p->seq, p->v, p->wm, par);
calc_wl_coax(d, p->n, p->seq, p->v, p->w, p->wm, par, p->wca);
calc_w5_and_w3(d, p->n, p->seq, p->v, p->w5, p->w3, par, p->wca);
}
#endif /* __CUDACC__ */
return p;
} /* end frna_new */
void frna_delete(frna_t p)
{
if (p) {
if (p->seq)
free(p->seq);
if (p->v)
free(p->v);
if (p->w)
free(p->w);
if (p->wm)
free(p->wm);
if (p->w5 - 1)
free(p->w5 - 1);
if (p->w3)
free(p->w3);
free(p);
}
}
void frna_write(const frna_t p, const char* outfile )
{
FILE *f = fopen(outfile,"w");
if (!f) {
printf("failed to open output file %s", outfile);
}
int i,j, n = p->n;
const fbase_t *s = p->seq;
fprintf(f, "n: %d\n", n);
fprintf(f, "seq: ");
for (i = 0; i < n; i++)
fprintf(f, "%c", fbase_as_char(s[i]));
fprintf(f, "\n");
fprintf(f, "i\tj\tV:\tW:\tWM:\tV':\tW':\tWM':\n");
for (j = 0; j < n; j++)
for(i = 0; i < j; i++)
fprintf(f, "%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\n",
i+1,j+1,p->v[ind(i,j,n)],p->w[ind(i,j,n)],
p->wm[ind(i,j,n)],p->v[ind(j,i,n)],p->w[ind(j,i,n)],p->wm[ind(j,i,n)] );
fprintf(f, "\n\n\ni\tw5[i]\tw3[i]\n");
fprintf(f, "0\t0\t0\n");
for (i = 0; i < n; i++) {
fprintf(f, "%d\t",i+1);
fprintf(f, "%d\t",p->w5[i]);
fprintf(f, "%d\n",p->w3[i]);
}
}
short base_as_num(fbase_t b)
{
switch (b) {
case A:
return 1;
case C:
return 2;
case G:
return 3;
case U:
return 4;
default:
printf("unknown base %d\n",b);
die("base_as_num: unknown base");
return 0;
}
}
fbase_t num_as_base(short x)
{
switch (x) {
case 1:
return A;
case 2:
return C;
case 3:
return G;
case 4:
return U;
default:
return A;
}
}
|
the_stack
|
#include "k2/csrc/device_guard.h"
#include "k2/csrc/fsa.h"
#include "k2/csrc/fsa_algo.h"
#include "k2/csrc/fsa_utils.h"
#include "k2/csrc/host_shim.h"
#include "k2/csrc/rm_epsilon.h"
#include "k2/python/csrc/torch/fsa_algo.h"
#include "k2/python/csrc/torch/torch_util.h"
#include "k2/python/csrc/torch/v2/ragged_any.h"
namespace k2 {
static void PybindTopSort(py::module &m) {
// TODO(fangjun): add docstring for this function
//
// if need_arc_map is true, it returns (sorted_fsa_vec, arc_map);
// otherwise, it returns (sorted_fsa_vec, None)
m.def(
"top_sort",
[](FsaVec &src, bool need_arc_map = true)
-> std::pair<FsaVec, torch::optional<torch::Tensor>> {
DeviceGuard guard(src.Context());
Array1<int32_t> arc_map;
FsaVec sorted;
TopSort(src, &sorted, need_arc_map ? &arc_map : nullptr);
torch::optional<torch::Tensor> tensor;
if (need_arc_map) tensor = ToTorch(arc_map);
return std::make_pair(sorted, tensor);
},
py::arg("src"), py::arg("need_arc_map") = true);
}
static void PybindLinearFsa(py::module &m) {
m.def(
"linear_fsa",
[](RaggedAny &labels, torch::optional<torch::Device> = {}) -> FsaVec {
DeviceGuard guard(labels.any.Context());
return LinearFsas(labels.any.Specialize<int32_t>());
},
py::arg("labels"), py::arg("device") = py::none());
m.def(
"linear_fsa",
[](const std::vector<int32_t> &labels,
torch::optional<torch::Device> device = {}) -> Fsa {
ContextPtr context =
GetContext(device.value_or(torch::Device(torch::kCPU)));
DeviceGuard guard(context);
Array1<int32_t> array(context, labels);
return LinearFsa(array); //
},
py::arg("labels"), py::arg("device") = py::none());
m.def(
"linear_fsa",
[](const std::vector<int32_t> &labels,
torch::optional<std::string> device = {}) -> Fsa {
ContextPtr context = GetContext(torch::Device(device.value_or("cpu")));
DeviceGuard guard(context);
Array1<int32_t> array(context, labels);
return LinearFsa(array); //
},
py::arg("labels"), py::arg("device") = py::none());
m.def(
"linear_fsa",
[](const std::vector<std::vector<int32_t>> &labels,
torch::optional<torch::Device> device = {}) -> FsaVec {
ContextPtr context =
GetContext(device.value_or(torch::Device(torch::kCPU)));
DeviceGuard guard(context);
Ragged<int32_t> ragged = CreateRagged2<int32_t>(labels).To(context);
return LinearFsas(ragged);
},
py::arg("labels"), py::arg("device") = py::none());
m.def(
"linear_fsa",
[](const std::vector<std::vector<int32_t>> &labels,
torch::optional<std::string> device = {}) -> FsaVec {
ContextPtr context = GetContext(torch::Device(device.value_or("cpu")));
DeviceGuard guard(context);
Ragged<int32_t> ragged = CreateRagged2<int32_t>(labels).To(context);
return LinearFsas(ragged);
},
py::arg("labels"), py::arg("device") = py::none());
}
static void PybindIntersect(py::module &m) {
// It runs on CUDA if and only if
// - a_fsas is on GPU
// - b_fsas is on GPU
// - treat_epsilons_specially is False
//
// Otherwise, it is run on CPU.
m.def(
"intersect",
[](FsaOrVec &a_fsas, int32_t properties_a, FsaOrVec &b_fsas,
int32_t properties_b, bool treat_epsilons_specially = true,
bool need_arc_map =
true) -> std::tuple<FsaOrVec, torch::optional<torch::Tensor>,
torch::optional<torch::Tensor>> {
DeviceGuard guard(a_fsas.Context());
Array1<int32_t> a_arc_map;
Array1<int32_t> b_arc_map;
FsaVec out;
if (!treat_epsilons_specially &&
a_fsas.Context()->GetDeviceType() == kCuda) {
FsaVec a_fsa_vec = FsaToFsaVec(a_fsas);
FsaVec b_fsa_vec = FsaToFsaVec(b_fsas);
std::vector<int32_t> tmp_b_to_a_map(b_fsa_vec.Dim0());
if (a_fsa_vec.Dim0() == 1) {
std::fill(tmp_b_to_a_map.begin(), tmp_b_to_a_map.end(), 0);
} else {
std::iota(tmp_b_to_a_map.begin(), tmp_b_to_a_map.end(), 0);
}
Array1<int32_t> b_to_a_map(a_fsa_vec.Context(), tmp_b_to_a_map);
// TODO: should perhaps just always make this false, for
// predictability, and let the user call intersect_device
// if they want to use sorted matching?
bool sorted_match_a = ((properties_a & kFsaPropertiesArcSorted) != 0);
out = IntersectDevice(
a_fsa_vec, properties_a, b_fsa_vec, properties_b, b_to_a_map,
need_arc_map ? &a_arc_map : nullptr,
need_arc_map ? &b_arc_map : nullptr, sorted_match_a);
} else {
Intersect(a_fsas, properties_a, b_fsas, properties_b,
treat_epsilons_specially, &out,
need_arc_map ? &a_arc_map : nullptr,
need_arc_map ? &b_arc_map : nullptr);
}
FsaOrVec ans;
if (a_fsas.NumAxes() == 2 && b_fsas.NumAxes() == 2)
ans = GetFsaVecElement(out, 0);
else
ans = out;
torch::optional<torch::Tensor> a_tensor;
torch::optional<torch::Tensor> b_tensor;
if (need_arc_map) {
a_tensor = ToTorch(a_arc_map);
b_tensor = ToTorch(b_arc_map);
}
return std::make_tuple(ans, a_tensor, b_tensor);
},
py::arg("a_fsas"), py::arg("properties_a"), py::arg("b_fsas"),
py::arg("properties_b"), py::arg("treat_epsilons_specially") = true,
py::arg("need_arc_map") = true,
R"(
If treat_epsilons_specially it will treat epsilons as epsilons; otherwise
it will treat them as a real symbol.
If need_arc_map is true, it returns a tuple (fsa_vec, a_arc_map, b_arc_map);
If need_arc_map is false, it returns a tuple (fsa_vec, None, None).
a_arc_map maps arc indexes of the returned fsa to the input a_fsas.
)");
}
static void PybindIntersectDevice(py::module &m) {
// It works on both GPU and CPU.
// But it is super slow on CPU.
// Do not use this one for CPU; use `Intersect` for CPU.
m.def(
"intersect_device",
[](FsaVec &a_fsas, int32_t properties_a, FsaVec &b_fsas,
int32_t properties_b, torch::Tensor b_to_a_map,
bool need_arc_map = true,
bool sorted_match_a =
false) -> std::tuple<FsaVec, torch::optional<torch::Tensor>,
torch::optional<torch::Tensor>> {
DeviceGuard guard(a_fsas.Context());
Array1<int32_t> a_arc_map;
Array1<int32_t> b_arc_map;
Array1<int32_t> b_to_a_map_array = FromTorch<int32_t>(b_to_a_map);
FsaVec ans = IntersectDevice(
a_fsas, properties_a, b_fsas, properties_b, b_to_a_map_array,
need_arc_map ? &a_arc_map : nullptr,
need_arc_map ? &b_arc_map : nullptr, sorted_match_a);
torch::optional<torch::Tensor> a_tensor;
torch::optional<torch::Tensor> b_tensor;
if (need_arc_map) {
a_tensor = ToTorch(a_arc_map);
b_tensor = ToTorch(b_arc_map);
}
return std::make_tuple(ans, a_tensor, b_tensor);
},
py::arg("a_fsas"), py::arg("properties_a"), py::arg("b_fsas"),
py::arg("properties_b"), py::arg("b_to_a_map"),
py::arg("need_arc_map") = true, py::arg("sorted_match_a") = false);
}
static void PybindIntersectDensePruned(py::module &m) {
m.def(
"intersect_dense_pruned",
[](FsaVec &a_fsas, DenseFsaVec &b_fsas, float search_beam,
float output_beam, int32_t min_active_states,
int32_t max_active_states)
-> std::tuple<FsaVec, torch::Tensor, torch::Tensor> {
DeviceGuard guard(a_fsas.Context());
Array1<int32_t> arc_map_a;
Array1<int32_t> arc_map_b;
FsaVec out;
IntersectDensePruned(a_fsas, b_fsas, search_beam, output_beam,
min_active_states, max_active_states, &out,
&arc_map_a, &arc_map_b);
return std::make_tuple(out, ToTorch(arc_map_a), ToTorch(arc_map_b));
},
py::arg("a_fsas"), py::arg("b_fsas"), py::arg("search_beam"),
py::arg("output_beam"), py::arg("min_active_states"),
py::arg("max_active_states"));
}
static void PybindIntersectDense(py::module &m) {
m.def(
"intersect_dense",
[](FsaVec &a_fsas, DenseFsaVec &b_fsas,
torch::optional<torch::Tensor> a_to_b_map, float output_beam,
int32_t max_states, int32_t max_arcs)
-> std::tuple<FsaVec, torch::Tensor, torch::Tensor> {
DeviceGuard guard(a_fsas.Context());
Array1<int32_t> arc_map_a;
Array1<int32_t> arc_map_b;
FsaVec out;
// the following is in case a_fsas had 2 not 3 axes. It happens in some
// test code, and IntersectDense() used to support it.
FsaVec a_fsa_vec = FsaToFsaVec(a_fsas);
Array1<int32_t> a_to_b_map_array;
if (a_to_b_map.has_value()) {
a_to_b_map_array = FromTorch<int32_t>(a_to_b_map.value());
} else {
a_to_b_map_array = Arange(a_fsa_vec.Context(), 0, a_fsa_vec.Dim0());
}
IntersectDense(a_fsa_vec, b_fsas, &a_to_b_map_array, output_beam,
max_states, max_arcs, &out, &arc_map_a, &arc_map_b);
return std::make_tuple(out, ToTorch(arc_map_a), ToTorch(arc_map_b));
},
py::arg("a_fsas"), py::arg("b_fsas"), py::arg("a_to_b_map"),
py::arg("output_beam"), py::arg("max_states") = 15000000,
py::arg("max_arcs") = 1073741824 /* 2^30 */);
}
static void PybindConnect(py::module &m) {
m.def(
"connect",
[](Fsa &src, bool need_arc_map =
true) -> std::pair<Fsa, torch::optional<torch::Tensor>> {
DeviceGuard guard(src.Context());
Array1<int32_t> arc_map;
Fsa out;
Connect(src, &out, need_arc_map ? &arc_map : nullptr);
torch::optional<torch::Tensor> tensor;
if (need_arc_map) tensor = ToTorch(arc_map);
return std::make_pair(out, tensor);
},
py::arg("src"), py::arg("need_arc_map") = true);
}
static void PybindArcSort(py::module &m) {
m.def(
"arc_sort",
[](FsaOrVec &src, bool need_arc_map = true)
-> std::pair<FsaOrVec, torch::optional<torch::Tensor>> {
DeviceGuard guard(src.Context());
Array1<int32_t> arc_map;
FsaOrVec out;
ArcSort(src, &out, need_arc_map ? &arc_map : nullptr);
torch::optional<torch::Tensor> tensor;
if (need_arc_map) tensor = ToTorch(arc_map);
return std::make_pair(out, tensor);
},
py::arg("src"), py::arg("need_arc_map") = true);
}
static void PybindShortestPath(py::module &m) {
// returns a std::pair containing the following entries (listed in order):
// - FsaVec
// contains linear FSAs of the best path of every FSA
// - best_path_arc_indexes
// a RaggedInt containing the arc indexes of the best paths
m.def(
"shortest_path",
[](FsaVec &fsas,
torch::Tensor entering_arcs) -> std::pair<Fsa, RaggedAny> {
DeviceGuard guard(fsas.Context());
Array1<int32_t> entering_arcs_array = FromTorch<int32_t>(entering_arcs);
Ragged<int32_t> best_path_arc_indexes =
ShortestPath(fsas, entering_arcs_array);
FsaVec out = FsaVecFromArcIndexes(fsas, best_path_arc_indexes);
return std::make_pair(out, RaggedAny(best_path_arc_indexes.Generic()));
},
py::arg("fsas"), py::arg("entering_arcs"));
}
static void PybindAddEpsilonSelfLoops(py::module &m) {
// Return a pair containing:
// - FsaOrVec
// the output FSA
// - arc_map
// a 1-D torch::Tensor of dtype torch.int32;
// None if `need_arc_map` is false
m.def(
"add_epsilon_self_loops",
[](FsaOrVec &src, bool need_arc_map = true)
-> std::pair<FsaOrVec, torch::optional<torch::Tensor>> {
DeviceGuard guard(src.Context());
Array1<int32_t> arc_map;
FsaOrVec out;
AddEpsilonSelfLoops(src, &out, need_arc_map ? &arc_map : nullptr);
torch::optional<torch::Tensor> arc_map_tensor;
if (need_arc_map) arc_map_tensor = ToTorch(arc_map);
return std::make_pair(out, arc_map_tensor);
},
py::arg("src"), py::arg("need_arc_map") = true);
}
static void PybindUnion(py::module &m) {
m.def(
"union",
[](FsaVec &fsas, bool need_arc_map = true)
-> std::pair<Fsa, torch::optional<torch::Tensor>> {
DeviceGuard guard(fsas.Context());
Array1<int32_t> arc_map;
Fsa out = Union(fsas, need_arc_map ? &arc_map : nullptr);
torch::optional<torch::Tensor> arc_map_tensor;
if (need_arc_map) arc_map_tensor = ToTorch(arc_map);
return std::make_pair(out, arc_map_tensor);
},
py::arg("fsas"), py::arg("need_arc_map") = true);
}
static void PybindRemoveEpsilon(py::module &m) {
m.def(
"remove_epsilon_host",
[](FsaOrVec &src) -> std::pair<FsaOrVec, RaggedAny> {
DeviceGuard guard(src.Context());
FsaOrVec dest;
Ragged<int32_t> arc_map;
RemoveEpsilonHost(src, &dest, &arc_map);
return std::make_pair(dest, RaggedAny(arc_map.Generic()));
},
py::arg("src"));
m.def(
"remove_epsilon_device",
[](FsaOrVec &src) -> std::pair<FsaOrVec, RaggedAny> {
DeviceGuard guard(src.Context());
FsaOrVec dest;
Ragged<int32_t> arc_map;
RemoveEpsilonDevice(src, &dest, &arc_map);
return std::make_pair(dest, RaggedAny(arc_map.Generic()));
},
py::arg("src"));
m.def(
"remove_epsilon",
[](FsaOrVec &src, int32_t properties) -> std::pair<FsaOrVec, RaggedAny> {
DeviceGuard guard(src.Context());
FsaOrVec dest;
Ragged<int32_t> arc_map;
RemoveEpsilon(src, properties, &dest, &arc_map);
return std::make_pair(dest, RaggedAny(arc_map.Generic()));
},
py::arg("src"), py::arg("properties"));
m.def(
"remove_epsilon_and_add_self_loops",
[](FsaOrVec &src, int32_t properties) -> std::pair<FsaOrVec, RaggedAny> {
DeviceGuard guard(src.Context());
FsaOrVec dest;
Ragged<int32_t> arc_map;
RemoveEpsilonAndAddSelfLoops(src, properties, &dest, &arc_map);
return std::make_pair(dest, RaggedAny(arc_map.Generic()));
},
py::arg("src"), py::arg("properties"));
}
static void PybindDeterminize(py::module &m) {
py::enum_<DeterminizeWeightPushingType>(m, "DeterminizeWeightPushingType",
py::arithmetic())
.value("kTropicalWeightPushing",
DeterminizeWeightPushingType::kTropicalWeightPushing)
.value("kLogWeightPushing",
DeterminizeWeightPushingType::kLogWeightPushing)
.value("kNoWeightPushing",
DeterminizeWeightPushingType::kNoWeightPushing);
m.def(
"determinize",
[](FsaOrVec &src, DeterminizeWeightPushingType weight_pushing_type)
-> std::pair<FsaOrVec, RaggedAny> {
DeviceGuard guard(src.Context());
FsaOrVec dest;
Ragged<int32_t> arc_map;
Determinize(src, weight_pushing_type, &dest, &arc_map);
return std::make_pair(dest, RaggedAny(arc_map.Generic()));
},
py::arg("src"), py::arg("weight_pushing_type"));
}
static void PybindClosure(py::module &m) {
m.def(
"closure",
[](Fsa &src, bool need_arc_map =
true) -> std::pair<Fsa, torch::optional<torch::Tensor>> {
DeviceGuard guard(src.Context());
Array1<int32_t> arc_map;
Fsa out = Closure(src, need_arc_map ? &arc_map : nullptr);
torch::optional<torch::Tensor> arc_map_tensor;
if (need_arc_map) arc_map_tensor = ToTorch(arc_map);
return std::make_pair(out, arc_map_tensor);
},
py::arg("src"), py::arg("need_arc_map") = true);
}
static void PybindInvert(py::module &m) {
m.def(
"invert",
[](FsaOrVec &src, Ragged<int32_t> &src_aux_labels,
bool need_arc_map =
true) -> std::tuple<FsaOrVec, Ragged<int32_t>,
torch::optional<torch::Tensor>> {
DeviceGuard guard(src.Context());
FsaOrVec dest;
Ragged<int32_t> dest_aux_labels;
Array1<int32_t> arc_map;
Invert(src, src_aux_labels, &dest, &dest_aux_labels,
need_arc_map ? &arc_map : nullptr);
torch::optional<torch::Tensor> arc_map_tensor;
if (need_arc_map) arc_map_tensor = ToTorch(arc_map);
return std::make_tuple(dest, dest_aux_labels, arc_map_tensor);
},
py::arg("src"), py::arg("src_aux_labels"), py::arg("need_arc_map"));
}
static void PybindRemoveEpsilonSelfLoops(py::module &m) {
m.def(
"remove_epsilon_self_loops",
[](FsaOrVec &src, bool need_arc_map = true)
-> std::pair<FsaOrVec, torch::optional<torch::Tensor>> {
DeviceGuard guard(src.Context());
Array1<int32_t> arc_map;
FsaOrVec ans =
RemoveEpsilonSelfLoops(src, need_arc_map ? &arc_map : nullptr);
torch::optional<torch::Tensor> arc_map_tensor;
if (need_arc_map) arc_map_tensor = ToTorch(arc_map);
return std::make_pair(ans, arc_map_tensor);
},
py::arg("src"), py::arg("need_arc_map") = true);
}
static void PybindExpandArcs(py::module &m) {
// See doc-string below.
m.def(
"expand_arcs",
[](FsaOrVec &fsas, std::vector<RaggedAny> &ragged)
-> std::tuple<FsaOrVec, std::vector<torch::Tensor>, torch::Tensor> {
DeviceGuard guard(fsas.Context());
std::vector<Ragged<int32_t>> ragged_labels(ragged.size());
int32_t ragged_labels_size = ragged_labels.size();
for (int32_t i = 0; i != ragged_labels_size; ++i) {
ragged_labels[i] = ragged[i].any.Specialize<int32_t>();
}
K2_CHECK_NE(ragged_labels_size, 0);
K2_CHECK_LE(ragged_labels_size, 6); // see SmallVec<...,6> below.
ContextPtr c = fsas.Context();
int32_t num_arcs = fsas.NumElements();
SmallVec<int32_t *, 6> ragged_labels_row_splits, ragged_labels_data;
for (int32_t r = 0; r < ragged_labels_size; r++) {
K2_CHECK_EQ(ragged_labels[r].NumAxes(), 2);
K2_CHECK_EQ(ragged_labels[r].Dim0(), num_arcs);
ragged_labels_row_splits.data[r] =
ragged_labels[r].RowSplits(1).Data();
ragged_labels_data.data[r] = ragged_labels[r].values.Data();
}
// we'll be using the labels on the arcs of `fsas` (i.e. whether they
// are -1 or not) to determine whether arcs are final or not. The
// assumption is that `fsas` is valid.
const Arc *fsas_arcs = fsas.values.Data();
// will be set to the maximum of 1 and the length of the i'th sub-list
// of any of the lists in `ragged_labels` (for final-arcs where the last
// element of a sub-list was not -1, we imagine that there was an extra
// element of the sub-list with the value of -1).
Array1<int32_t> combined_size(c, num_arcs + 1);
int32_t *combined_size_data = combined_size.Data();
K2_EVAL(
c, num_arcs, lambda_get_combined_size, (int32_t arc_idx)->void {
int32_t fsa_label = fsas_arcs[arc_idx].label;
bool arc_is_final = (fsa_label == -1);
int32_t max_num_elems = 1;
for (int32_t r = 0; r < ragged_labels_size; r++) {
int32_t this_label_idx0x =
ragged_labels_row_splits.data[r][arc_idx],
next_label_idx0x =
ragged_labels_row_splits.data[r][arc_idx + 1];
int32_t size = next_label_idx0x - this_label_idx0x;
// Adds an extra place for the final-arc's -1 if this is a
// final-arc and the ragged label list did not have a -1 as its
// last element. We don't do a memory fetch until we know that
// it would make a difference to the result.
if (arc_is_final && size >= max_num_elems &&
ragged_labels_data.data[r][next_label_idx0x - 1] != -1)
max_num_elems = size + 1;
else if (size > max_num_elems)
max_num_elems = size;
}
combined_size_data[arc_idx] = max_num_elems;
});
ExclusiveSum(combined_size, &combined_size);
RaggedShape combined_shape = RaggedShape2(&combined_size, nullptr, -1);
Array1<int32_t> fsas_arc_map, labels_arc_map;
FsaOrVec ans =
ExpandArcs(fsas, combined_shape, &fsas_arc_map, &labels_arc_map);
int32_t ans_num_arcs = ans.NumElements();
Array2<int32_t> labels(c, ragged_labels_size, ans_num_arcs);
auto labels_acc = labels.Accessor();
// we'll be using the labels on the returned arcs (i.e. whether they are
// -1 or not) to determine whether arcs are final or not. The
// assumption is that the answer is valid; since we'll likely be
// constructing an Fsa (i.e. a python-level Fsa) from it, the properties
// should be checked, so if this assumption is false we'll find out
// sooner or later.
const Arc *ans_arcs = ans.values.Data();
K2_CHECK_EQ(labels_arc_map.Dim(), ans_num_arcs);
const int32_t *labels_arc_map_data = labels_arc_map.Data();
const int32_t *combined_shape_row_ids_data =
combined_shape.RowIds(1).Data(),
*combined_shape_row_splits_data =
combined_shape.RowSplits(1).Data();
K2_EVAL2(
c, ragged_labels_size, ans_num_arcs, lambda_linearize_labels,
(int32_t r, int32_t arc_idx)->void {
int32_t fsa_label = ans_arcs[arc_idx].label;
bool arc_is_final = (fsa_label == -1);
int32_t combined_shape_idx01 = labels_arc_map_data[arc_idx];
// The reason we can assert the following is that `combined_size`
// has no empty sub-lists because we initialized `max_num_elems =
// 1` when we set up those sizes.
K2_CHECK_GE(combined_shape_idx01, 0);
// combined_shape_idx0 is also an arc_idx012 into the *original*
// fsas; combined_shape_idx1 is the index into the sequence of
// ragged labels attached to that arc.
int32_t combined_shape_idx0 =
combined_shape_row_ids_data[combined_shape_idx01],
combined_shape_idx0x =
combined_shape_row_splits_data[combined_shape_idx0],
combined_shape_idx1 =
combined_shape_idx01 - combined_shape_idx0x;
K2_CHECK_GE(combined_shape_idx1, 0);
int32_t src_idx0x =
ragged_labels_row_splits.data[r][combined_shape_idx0],
src_idx0x_next = ragged_labels_row_splits
.data[r][combined_shape_idx0 + 1],
src_idx01 = src_idx0x + combined_shape_idx1;
int32_t this_label;
if (src_idx01 >= src_idx0x_next) {
// We were past the end of the source sub-list of ragged labels.
this_label = 0;
} else {
this_label = ragged_labels_data.data[r][src_idx01];
}
if (this_label == -1 || this_label == 0)
this_label = (arc_is_final ? -1 : 0);
if (arc_is_final) {
// In positions where the source FSA has label -1 (which should
// be final-arcs), the ragged labels should have label -1. If
// this fails it will be because final-arcs had labels that were
// neither -1 or 0. If this becomes a problem in future we may
// have to revisit this.
K2_CHECK_EQ(this_label, fsa_label);
}
labels_acc(r, arc_idx) = this_label;
});
std::vector<torch::Tensor> ans_labels(ragged_labels_size);
for (int32_t r = 0; r < ragged_labels_size; r++) {
Array1<int32_t> labels_row = labels.Row(r);
ans_labels[r] = ToTorch(labels_row);
}
return std::make_tuple(ans, ans_labels, ToTorch(fsas_arc_map));
},
py::arg("fsas"), py::arg("ragged_labels"),
R"(
This function expands the arcs in an Fsa or FsaVec so that we can
turn a list of attributes stored as ragged tensors into normal, linear
tensors. It does this by expanding arcs into linear chains of arcs.
Args:
fsas: The Fsa or FsaVec (ragged tensor of arcs with 2 or 3 axes)
whose structure we want to copy and possibly expand chains of arcs
ragged_labels: A list of at least one ragged tensor of
ints; must satisfy ragged_labels[i].NumAxes() == 2
and ragged_labels[i].Dim0() == fsas.NumElements(),
i.e. one sub-list per arc in the input FSAs
Returns: A triplet (ans_fsas, ans_label_list, arc_map), where:
ans_fsas is the possibly-modified arcs,
ans_label_list is a list of torch::Tensor representing
the linearized form of `ragged_labels`
arc_map is the map from arcs in `ans_fsas` to arcs
in `fsas` where the score came from, or -1 in positions
for newly-created arcs
Caution: the behavior of this function w.r.t. final-arcs and -1's in ragged
labels is a little complicated. We ensure that in the linearized labels,
all final-arcs have a label of -1 (we turn final-arcs into longer sequences
if necessary to ensure this); and we ensure that no other arcs have -1's
(we turn -1's into 0 to ensure this).
)");
}
static void PybindFixFinalLabels(py::module &m) {
// See doc-string below.
m.def(
"fix_final_labels",
[](FsaOrVec &fsas, torch::optional<torch::Tensor> labels) -> void {
DeviceGuard guard(fsas.Context());
if (labels.has_value()) {
Array1<int32_t> labels_array = FromTorch<int32_t>(labels.value());
K2_CHECK_EQ(labels_array.Dim(), fsas.NumElements());
K2_CHECK(fsas.Context()->IsCompatible(*labels_array.Context()));
FixFinalLabels(fsas, labels_array.Data(), 1);
} else {
// `label` is the 3rd field of struct Arc.
FixFinalLabels(
fsas, reinterpret_cast<int32_t *>(fsas.values.Data()) + 2, 4);
}
},
py::arg("fsas"), py::arg("labels"),
R"(
This function modifies, in-place, labels attached to arcs, so
that they satisfy constraints on the placement of -1's: namely,
that arcs to final-states must have -1's as their label, and
that no other arcs can have -1 as their label.
fsas: the FSA whose labels we want to modify
labels: if supplied, must be a tensor of int32 with shape
equal to (fsas.Dim0(),); and in this case, these labels
will be modified. If not supplied, the labels on the arcs
of `fsas` will be modified.
)");
}
static void PybindReplaceFsa(py::module &m) {
m.def(
"replace_fsa",
[](FsaVec &src, FsaOrVec &index, int32_t symbol_begin_range)
-> std::tuple<FsaOrVec, torch::optional<torch::Tensor>,
torch::optional<torch::Tensor>> {
DeviceGuard guard(index.Context());
Array1<int32_t> arc_map_src, arc_map_index;
FsaOrVec out = ReplaceFsa(src, index, symbol_begin_range, &arc_map_src,
&arc_map_index);
torch::optional<torch::Tensor> src_map_tensor, index_map_tensor;
src_map_tensor = ToTorch(arc_map_src);
index_map_tensor = ToTorch(arc_map_index);
return std::make_tuple(out, src_map_tensor, index_map_tensor);
},
py::arg("src"), py::arg("index"), py::arg("symbol_begin_range"));
}
static void PybindCtcGraph(py::module &m) {
m.def(
"ctc_graph",
[](RaggedAny &symbols, bool modified = false)
-> std::pair<FsaVec, torch::Tensor> {
DeviceGuard guard(symbols.any.Context());
Array1<int32_t> aux_labels;
FsaVec graph = CtcGraphs(symbols.any.Specialize<int32_t>(), modified,
&aux_labels);
torch::Tensor tensor = ToTorch(aux_labels);
return std::make_pair(graph, tensor);
},
py::arg("symbols"), py::arg("modified") = false);
}
static void PybindCtcTopo(py::module &m) {
m.def(
"ctc_topo",
[](int32_t max_token, torch::optional<torch::Device> device = {},
bool modified = false) -> std::pair<Fsa, torch::Tensor> {
ContextPtr context = GetContext(device.value_or(torch::Device("cpu")));
DeviceGuard guard(context);
Array1<int32_t> aux_labels;
Fsa fsa = CtcTopo(context, max_token, modified, &aux_labels);
torch::Tensor tensor = ToTorch(aux_labels);
return std::make_pair(fsa, tensor);
},
py::arg("max_token"), py::arg("device") = py::none(),
py::arg("modified") = false);
m.def(
"ctc_topo",
[](int32_t max_token, torch::optional<std::string> device = {},
bool modified = false) -> std::pair<Fsa, torch::Tensor> {
ContextPtr context = GetContext(torch::Device(device.value_or("cpu")));
DeviceGuard guard(context);
Array1<int32_t> aux_labels;
Fsa fsa = CtcTopo(context, max_token, modified, &aux_labels);
torch::Tensor tensor = ToTorch(aux_labels);
return std::make_pair(fsa, tensor);
},
py::arg("max_token"), py::arg("device") = py::none(),
py::arg("modified") = false);
}
static void PybindLevenshteinGraph(py::module &m) {
m.def(
"levenshtein_graph",
[](RaggedAny &symbols, float ins_del_score = -0.501,
bool need_score_offset =
true) -> std::tuple<FsaVec, torch::Tensor,
torch::optional<torch::Tensor>> {
DeviceGuard guard(symbols.any.Context());
Array1<int32_t> aux_labels;
Array1<float> score_offsets;
FsaVec graph = LevenshteinGraphs(symbols.any.Specialize<int32_t>(),
ins_del_score, &aux_labels,
need_score_offset ? &score_offsets : nullptr);
torch::Tensor aux_labels_tensor = ToTorch(aux_labels);
torch::optional<torch::Tensor> score_offsets_tensor;
if (need_score_offset) score_offsets_tensor = ToTorch(score_offsets);
return std::make_tuple(graph, aux_labels_tensor, score_offsets_tensor);
},
py::arg("symbols"), py::arg("ins_del_score") = -0.501,
py::arg("need_score_offset") = true);
}
} // namespace k2
void PybindFsaAlgo(py::module &m) {
k2::PybindAddEpsilonSelfLoops(m);
k2::PybindArcSort(m);
k2::PybindClosure(m);
k2::PybindConnect(m);
k2::PybindCtcGraph(m);
k2::PybindCtcTopo(m);
k2::PybindDeterminize(m);
k2::PybindExpandArcs(m);
k2::PybindFixFinalLabels(m);
k2::PybindIntersect(m);
k2::PybindIntersectDense(m);
k2::PybindIntersectDensePruned(m);
k2::PybindIntersectDevice(m);
k2::PybindInvert(m);
k2::PybindLevenshteinGraph(m);
k2::PybindLinearFsa(m);
k2::PybindRemoveEpsilon(m);
k2::PybindRemoveEpsilonSelfLoops(m);
k2::PybindReplaceFsa(m);
k2::PybindShortestPath(m);
k2::PybindTopSort(m);
k2::PybindUnion(m);
}
|
the_stack
|
namespace std {
inline char2 numeric_limits<char2>::max() noexcept {
return make_char2(std::numeric_limits<char>::max(),
std::numeric_limits<char>::max());
}
inline char2 numeric_limits<char2>::min() noexcept {
return make_char2(std::numeric_limits<char>::min(),
std::numeric_limits<char>::min());
}
inline char2 numeric_limits<char2>::lowest() noexcept {
return make_char2(std::numeric_limits<char>::lowest(),
std::numeric_limits<char>::lowest());
}
//------------------------------------------------------------------------------
inline uchar2 numeric_limits<uchar2>::max() noexcept {
return make_uchar2(std::numeric_limits<unsigned char>::max(),
std::numeric_limits<unsigned char>::max());
}
inline uchar2 numeric_limits<uchar2>::min() noexcept {
return make_uchar2(std::numeric_limits<unsigned char>::min(),
std::numeric_limits<unsigned char>::min());
}
inline uchar2 numeric_limits<uchar2>::lowest() noexcept {
return make_uchar2(std::numeric_limits<unsigned char>::lowest(),
std::numeric_limits<unsigned char>::lowest());
}
//------------------------------------------------------------------------------
inline char4 numeric_limits<char4>::max() noexcept {
return make_char4(std::numeric_limits<char>::max(),
std::numeric_limits<char>::max(),
std::numeric_limits<char>::max(),
std::numeric_limits<char>::max());
}
inline char4 numeric_limits<char4>::min() noexcept {
return make_char4(std::numeric_limits<char>::min(),
std::numeric_limits<char>::min(),
std::numeric_limits<char>::min(),
std::numeric_limits<char>::min());
}
inline char4 numeric_limits<char4>::lowest() noexcept {
return make_char4(std::numeric_limits<char>::lowest(),
std::numeric_limits<char>::lowest(),
std::numeric_limits<char>::lowest(),
std::numeric_limits<char>::lowest());
}
//------------------------------------------------------------------------------
inline uchar4 numeric_limits<uchar4>::max() noexcept {
return make_uchar4(std::numeric_limits<unsigned char>::max(),
std::numeric_limits<unsigned char>::max(),
std::numeric_limits<unsigned char>::max(),
std::numeric_limits<unsigned char>::max());
}
inline uchar4 numeric_limits<uchar4>::min() noexcept {
return make_uchar4(std::numeric_limits<unsigned char>::min(),
std::numeric_limits<unsigned char>::min(),
std::numeric_limits<unsigned char>::min(),
std::numeric_limits<unsigned char>::min());
}
inline uchar4 numeric_limits<uchar4>::lowest() noexcept {
return make_uchar4(std::numeric_limits<unsigned char>::lowest(),
std::numeric_limits<unsigned char>::lowest(),
std::numeric_limits<unsigned char>::lowest(),
std::numeric_limits<unsigned char>::lowest());
}
//------------------------------------------------------------------------------
inline short2 numeric_limits<short2>::max() noexcept {
return make_short2(std::numeric_limits<short>::max(),
std::numeric_limits<short>::max());
}
inline short2 numeric_limits<short2>::min() noexcept {
return make_short2(std::numeric_limits<short>::min(),
std::numeric_limits<short>::min());
}
inline short2 numeric_limits<short2>::lowest() noexcept {
return make_short2(std::numeric_limits<short>::lowest(),
std::numeric_limits<short>::lowest());
}
//------------------------------------------------------------------------------
inline ushort2 numeric_limits<ushort2>::max() noexcept {
return make_ushort2(std::numeric_limits<unsigned short>::max(),
std::numeric_limits<unsigned short>::max());
}
inline ushort2 numeric_limits<ushort2>::min() noexcept {
return make_ushort2(std::numeric_limits<unsigned short>::min(),
std::numeric_limits<unsigned short>::min());
}
inline ushort2 numeric_limits<ushort2>::lowest() noexcept {
return make_ushort2(std::numeric_limits<unsigned short>::lowest(),
std::numeric_limits<unsigned short>::lowest());
}
//------------------------------------------------------------------------------
inline short4 numeric_limits<short4>::max() noexcept {
return make_short4(std::numeric_limits<short>::max(),
std::numeric_limits<short>::max(),
std::numeric_limits<short>::max(),
std::numeric_limits<short>::max());
}
inline short4 numeric_limits<short4>::min() noexcept {
return make_short4(std::numeric_limits<short>::min(),
std::numeric_limits<short>::min(),
std::numeric_limits<short>::min(),
std::numeric_limits<short>::min());
}
inline short4 numeric_limits<short4>::lowest() noexcept {
return make_short4(std::numeric_limits<short>::lowest(),
std::numeric_limits<short>::lowest(),
std::numeric_limits<short>::lowest(),
std::numeric_limits<short>::lowest());
}
//------------------------------------------------------------------------------
inline ushort4 numeric_limits<ushort4>::max() noexcept {
return make_ushort4(std::numeric_limits<unsigned short>::max(),
std::numeric_limits<unsigned short>::max(),
std::numeric_limits<unsigned short>::max(),
std::numeric_limits<unsigned short>::max());
}
inline ushort4 numeric_limits<ushort4>::min() noexcept {
return make_ushort4(std::numeric_limits<unsigned short>::min(),
std::numeric_limits<unsigned short>::min(),
std::numeric_limits<unsigned short>::min(),
std::numeric_limits<unsigned short>::min());
}
inline ushort4 numeric_limits<ushort4>::lowest() noexcept {
return make_ushort4(std::numeric_limits<unsigned short>::lowest(),
std::numeric_limits<unsigned short>::lowest(),
std::numeric_limits<unsigned short>::lowest(),
std::numeric_limits<unsigned short>::lowest());
}
//------------------------------------------------------------------------------
inline int2 numeric_limits<int2>::max() noexcept {
return make_int2(std::numeric_limits<int>::max(),
std::numeric_limits<int>::max());
}
inline int2 numeric_limits<int2>::min() noexcept {
return make_int2(std::numeric_limits<int>::min(),
std::numeric_limits<int>::min());
}
inline int2 numeric_limits<int2>::lowest() noexcept {
return make_int2(std::numeric_limits<int>::lowest(),
std::numeric_limits<int>::lowest());
}
//------------------------------------------------------------------------------
inline uint2 numeric_limits<uint2>::max() noexcept {
return make_uint2(std::numeric_limits<unsigned>::max(),
std::numeric_limits<unsigned>::max());
}
inline uint2 numeric_limits<uint2>::min() noexcept {
return make_uint2(std::numeric_limits<unsigned>::min(),
std::numeric_limits<unsigned>::min());
}
inline uint2 numeric_limits<uint2>::lowest() noexcept {
return make_uint2(std::numeric_limits<unsigned>::lowest(),
std::numeric_limits<unsigned>::lowest());
}
//------------------------------------------------------------------------------
inline int4 numeric_limits<int4>::max() noexcept {
return make_int4(std::numeric_limits<int>::max(),
std::numeric_limits<int>::max(),
std::numeric_limits<int>::max(),
std::numeric_limits<int>::max());
}
inline int4 numeric_limits<int4>::min() noexcept {
return make_int4(std::numeric_limits<int>::min(),
std::numeric_limits<int>::min(),
std::numeric_limits<int>::min(),
std::numeric_limits<int>::min());
}
inline int4 numeric_limits<int4>::lowest() noexcept {
return make_int4(std::numeric_limits<int>::lowest(),
std::numeric_limits<int>::lowest(),
std::numeric_limits<int>::lowest(),
std::numeric_limits<int>::lowest());
}
//------------------------------------------------------------------------------
inline uint4 numeric_limits<uint4>::max() noexcept {
return make_uint4(std::numeric_limits<unsigned>::max(),
std::numeric_limits<unsigned>::max(),
std::numeric_limits<unsigned>::max(),
std::numeric_limits<unsigned>::max());
}
inline uint4 numeric_limits<uint4>::min() noexcept {
return make_uint4(std::numeric_limits<unsigned>::min(),
std::numeric_limits<unsigned>::min(),
std::numeric_limits<unsigned>::min(),
std::numeric_limits<unsigned>::min());
}
inline uint4 numeric_limits<uint4>::lowest() noexcept {
return make_uint4(std::numeric_limits<unsigned>::lowest(),
std::numeric_limits<unsigned>::lowest(),
std::numeric_limits<unsigned>::lowest(),
std::numeric_limits<unsigned>::lowest());
}
//------------------------------------------------------------------------------
inline longlong2 numeric_limits<longlong2>::max() noexcept {
return make_longlong2(std::numeric_limits<long long>::max(),
std::numeric_limits<long long>::max());
}
inline longlong2 numeric_limits<longlong2>::min() noexcept {
return make_longlong2(std::numeric_limits<long long>::min(),
std::numeric_limits<long long>::min());
}
inline longlong2 numeric_limits<longlong2>::lowest() noexcept {
return make_longlong2(std::numeric_limits<long long>::lowest(),
std::numeric_limits<long long>::lowest());
}
//------------------------------------------------------------------------------
inline ulonglong2 numeric_limits<ulonglong2>::max() noexcept {
return make_ulonglong2(std::numeric_limits<long long unsigned>::max(),
std::numeric_limits<long long unsigned>::max());
}
inline ulonglong2 numeric_limits<ulonglong2>::min() noexcept {
return make_ulonglong2(std::numeric_limits<long long unsigned>::min(),
std::numeric_limits<long long unsigned>::min());
}
inline ulonglong2 numeric_limits<ulonglong2>::lowest() noexcept {
return make_ulonglong2(std::numeric_limits<long long unsigned>::lowest(),
std::numeric_limits<long long unsigned>::lowest());
}
//------------------------------------------------------------------------------
inline float2 numeric_limits<float2>::max() noexcept {
return make_float2(std::numeric_limits<float>::max(),
std::numeric_limits<float>::max());
}
inline float2 numeric_limits<float2>::min() noexcept {
return make_float2(std::numeric_limits<float>::min(),
std::numeric_limits<float>::min());
}
inline float2 numeric_limits<float2>::lowest() noexcept {
return make_float2(std::numeric_limits<float>::lowest(),
std::numeric_limits<float>::lowest());
}
//------------------------------------------------------------------------------
inline float4 numeric_limits<float4>::max() noexcept {
return make_float4(std::numeric_limits<float>::max(),
std::numeric_limits<float>::max(),
std::numeric_limits<float>::max(),
std::numeric_limits<float>::max());
}
inline float4 numeric_limits<float4>::min() noexcept {
return make_float4(std::numeric_limits<float>::min(),
std::numeric_limits<float>::min(),
std::numeric_limits<float>::min(),
std::numeric_limits<float>::min());
}
inline float4 numeric_limits<float4>::lowest() noexcept {
return make_float4(std::numeric_limits<float>::lowest(),
std::numeric_limits<float>::lowest(),
std::numeric_limits<float>::lowest(),
std::numeric_limits<float>::lowest());
}
//------------------------------------------------------------------------------
inline double2 numeric_limits<double2>::max() noexcept {
return make_double2(std::numeric_limits<double>::max(),
std::numeric_limits<double>::max());
}
inline double2 numeric_limits<double2>::min() noexcept {
return make_double2(std::numeric_limits<double>::min(),
std::numeric_limits<double>::min());
}
inline double2 numeric_limits<double2>::lowest() noexcept {
return make_double2(std::numeric_limits<double>::lowest(),
std::numeric_limits<double>::lowest());
}
} // namespace std
//==============================================================================
inline std::ostream& operator<< (std::ostream& out, const char2& value) {
out << "(" << value.x << "," << value.y << ")";
return out;
}
inline std::ostream& operator<< (std::ostream& out, const uchar2& value) {
out << "(" << value.x << "," << value.y << ")";
return out;
}
inline std::ostream& operator<< (std::ostream& out, const char4& value) {
out << "(" << value.x << "," << value.y << "," << value.z << ","
<< value.w << ")";
return out;
}
inline std::ostream& operator<< (std::ostream& out, const uchar4& value) {
out << "(" << value.x << "," << value.y << "," << value.z << ","
<< value.w << ")";
return out;
}
inline std::ostream& operator<< (std::ostream& out, const short2& value) {
out << "(" << value.x << "," << value.y << ")";
return out;
}
inline std::ostream& operator<< (std::ostream& out, const ushort2& value) {
out << "(" << value.x << "," << value.y << ")";
return out;
}
inline std::ostream& operator<< (std::ostream& out, const short4& value) {
out << "(" << value.x << "," << value.y << "," << value.z << ","
<< value.w << ")";
return out;
}
inline std::ostream& operator<< (std::ostream& out, const ushort4& value) {
out << "(" << value.x << "," << value.y << "," << value.z << ","
<< value.w << ")";
return out;
}
inline std::ostream& operator<< (std::ostream& out, const int2& value) {
out << "(" << value.x << "," << value.y << ")";
return out;
}
inline std::ostream& operator<< (std::ostream& out, const uint2& value) {
out << "(" << value.x << "," << value.y << ")";
return out;
}
inline std::ostream& operator<< (std::ostream& out, const int4& value) {
out << "(" << value.x << "," << value.y << "," << value.z << ","
<< value.w << ")";
return out;
}
inline std::ostream& operator<< (std::ostream& out, const uint4& value) {
out << "(" << value.x << "," << value.y << "," << value.z << ","
<< value.w << ")";
return out;
}
inline std::ostream& operator<<(std::ostream& out, const longlong2& value) {
out << "(" << value.x << "," << value.y << ")";
return out;
}
inline std::ostream& operator<<(std::ostream& out, const ulonglong2& value) {
out << "(" << value.x << "," << value.y << ")";
return out;
}
inline std::ostream& operator << (std::ostream& out, const float2& value) {
out << "(" << value.x << "," << value.y << ")";
return out;
}
inline std::ostream& operator<< (std::ostream& out, const float4& value) {
out << "(" << value.x << "," << value.y << "," << value.z << ","
<< value.w << ")";
return out;
}
inline std::ostream& operator << (std::ostream& out, const double2& value) {
out << "(" << value.x << "," << value.y << ")";
return out;
}
//==============================================================================
HOST_DEVICE bool operator== (const char2& A, const char2& B) {
return A.x == B.x && A.y == B.y;
}
HOST_DEVICE bool operator!= (const char2& A, const char2& B) {
return A.x != B.x || A.y != B.y;
}
HOST_DEVICE bool operator< (const char2& A, const char2& B) {
return A.x < B.x || (A.x == B.x && A.y < B.y);
}
HOST_DEVICE bool operator<= (const char2& A, const char2& B) {
return A.x <= B.x && A.y <= B.y;
}
HOST_DEVICE bool operator>= (const char2& A, const char2& B) {
return A.x >= B.x && A.y >= B.y;
}
HOST_DEVICE bool operator> (const char2& A, const char2& B) {
return A.x > B.x || (A.x == B.x && A.y > B.y);
}
//------------------------------------------------------------------------------
HOST_DEVICE bool operator== (const uchar2& A, const uchar2& B) {
return A.x == B.x && A.y == B.y;
}
HOST_DEVICE bool operator!= (const uchar2& A, const uchar2& B) {
return A.x != B.x || A.y != B.y;
}
HOST_DEVICE bool operator< (const uchar2& A, const uchar2& B) {
return A.x < B.x || (A.x == B.x && A.y < B.y);
}
HOST_DEVICE bool operator<= (const uchar2& A, const uchar2& B) {
return A.x <= B.x && A.y <= B.y;
}
HOST_DEVICE bool operator>= (const uchar2& A, const uchar2& B) {
return A.x >= B.x && A.y >= B.y;
}
HOST_DEVICE bool operator> (const uchar2& A, const uchar2& B) {
return A.x > B.x || (A.x == B.x && A.y > B.y);
}
//------------------------------------------------------------------------------
HOST_DEVICE bool operator== (const short2& A, const short2& B) {
return A.x == B.x && A.y == B.y;
}
HOST_DEVICE bool operator!= (const short2& A, const short2& B) {
return A.x != B.x || A.y != B.y;
}
HOST_DEVICE bool operator< (const short2& A, const short2& B) {
return A.x < B.x || (A.x == B.x && A.y < B.y);
}
HOST_DEVICE bool operator<= (const short2& A, const short2& B) {
return A.x <= B.x && A.y <= B.y;
}
HOST_DEVICE bool operator>= (const short2& A, const short2& B) {
return A.x >= B.x && A.y >= B.y;
}
HOST_DEVICE bool operator> (const short2& A, const short2& B) {
return A.x > B.x || (A.x == B.x && A.y > B.y);
}
//------------------------------------------------------------------------------
HOST_DEVICE bool operator== (const ushort2& A, const ushort2& B) {
return A.x == B.x && A.y == B.y;
}
HOST_DEVICE bool operator!= (const ushort2& A, const ushort2& B) {
return A.x != B.x || A.y != B.y;
}
HOST_DEVICE bool operator< (const ushort2& A, const ushort2& B) {
return A.x < B.x || (A.x == B.x && A.y < B.y);
}
HOST_DEVICE bool operator<= (const ushort2& A, const ushort2& B) {
return A.x <= B.x && A.y <= B.y;
}
HOST_DEVICE bool operator>= (const ushort2& A, const ushort2& B) {
return A.x >= B.x && A.y >= B.y;
}
HOST_DEVICE bool operator> (const ushort2& A, const ushort2& B) {
return A.x > B.x || (A.x == B.x && A.y > B.y);
}
//------------------------------------------------------------------------------
HOST_DEVICE bool operator== (const int2& A, const int2& B) {
return A.x == B.x && A.y == B.y;
}
HOST_DEVICE bool operator!= (const int2& A, const int2& B) {
return A.x != B.x || A.y != B.y;
}
HOST_DEVICE bool operator< (const int2& A, const int2& B) {
return A.x < B.x || (A.x == B.x && A.y < B.y);
}
HOST_DEVICE bool operator<= (const int2& A, const int2& B) {
return A.x <= B.x && A.y <= B.y;
}
HOST_DEVICE bool operator>= (const int2& A, const int2& B) {
return A.x >= B.x && A.y >= B.y;
}
HOST_DEVICE bool operator> (const int2& A, const int2& B) {
return A.x > B.x || (A.x == B.x && A.y > B.y);
}
//------------------------------------------------------------------------------
HOST_DEVICE bool operator== (const uint2& A, const uint2& B) {
return A.x == B.x && A.y == B.y;
}
HOST_DEVICE bool operator!= (const uint2& A, const uint2& B) {
return A.x != B.x || A.y != B.y;
}
HOST_DEVICE bool operator< (const uint2& A, const uint2& B) {
return A.x < B.x || (A.x == B.x && A.y < B.y);
}
HOST_DEVICE bool operator<= (const uint2& A, const uint2& B) {
return A.x <= B.x && A.y <= B.y;
}
HOST_DEVICE bool operator>= (const uint2& A, const uint2& B) {
return A.x >= B.x && A.y >= B.y;
}
HOST_DEVICE bool operator> (const uint2& A, const uint2& B) {
return A.x > B.x || (A.x == B.x && A.y > B.y);
}
//------------------------------------------------------------------------------
HOST_DEVICE bool operator== (const float2& A, const float2& B) {
return A.x == B.x && A.y == B.y;
}
HOST_DEVICE bool operator!= (const float2& A, const float2& B) {
return A.x != B.x || A.y != B.y;
}
HOST_DEVICE bool operator< (const float2& A, const float2& B) {
return A.x < B.x || (A.x == B.x && A.y < B.y);
}
HOST_DEVICE bool operator<= (const float2& A, const float2& B) {
return A.x <= B.x && A.y <= B.y;
}
HOST_DEVICE bool operator>= (const float2& A, const float2& B) {
return A.x >= B.x && A.y >= B.y;
}
HOST_DEVICE bool operator> (const float2& A, const float2& B) {
return A.x > B.x || (A.x == B.x && A.y > B.y);
}
//------------------------------------------------------------------------------
HOST_DEVICE bool operator== (const longlong2& A, const longlong2& B) {
return A.x == B.x && A.y == B.y;
}
HOST_DEVICE bool operator!= (const longlong2& A, const longlong2& B) {
return A.x != B.x || A.y != B.y;
}
HOST_DEVICE bool operator< (const longlong2& A, const longlong2& B) {
return A.x < B.x || (A.x == B.x && A.y < B.y);
}
HOST_DEVICE bool operator<= (const longlong2& A, const longlong2& B) {
return A.x <= B.x && A.y <= B.y;
}
HOST_DEVICE bool operator>= (const longlong2& A, const longlong2& B) {
return A.x >= B.x && A.y >= B.y;
}
HOST_DEVICE bool operator> (const longlong2& A, const longlong2& B) {
return A.x > B.x || (A.x == B.x && A.y > B.y);
}
//------------------------------------------------------------------------------
HOST_DEVICE bool operator== (const ulonglong2& A, const ulonglong2& B) {
return A.x == B.x && A.y == B.y;
}
HOST_DEVICE bool operator!= (const ulonglong2& A, const ulonglong2& B) {
return A.x != B.x || A.y != B.y;
}
HOST_DEVICE bool operator< (const ulonglong2& A, const ulonglong2& B) {
return A.x < B.x || (A.x == B.x && A.y < B.y);
}
HOST_DEVICE bool operator<= (const ulonglong2& A, const ulonglong2& B) {
return A.x <= B.x && A.y <= B.y;
}
HOST_DEVICE bool operator>= (const ulonglong2& A, const ulonglong2& B) {
return A.x >= B.x && A.y >= B.y;
}
HOST_DEVICE bool operator> (const ulonglong2& A, const ulonglong2& B) {
return A.x > B.x || (A.x == B.x && A.y > B.y);
}
//------------------------------------------------------------------------------
HOST_DEVICE bool operator== (const double2& A, const double2& B) {
return A.x == B.x && A.y == B.y;
}
HOST_DEVICE bool operator!= (const double2& A, const double2& B) {
return A.x != B.x || A.y != B.y;
}
HOST_DEVICE bool operator< (const double2& A, const double2& B) {
return A.x < B.x || (A.x == B.x && A.y < B.y);
}
HOST_DEVICE bool operator<= (const double2& A, const double2& B) {
return A.x <= B.x && A.y <= B.y;
}
HOST_DEVICE bool operator>= (const double2& A, const double2& B) {
return A.x >= B.x && A.y >= B.y;
}
HOST_DEVICE bool operator> (const double2& A, const double2& B) {
return A.x > B.x || (A.x == B.x && A.y > B.y);
}
//------------------------------------------------------------------------------
HOST_DEVICE bool operator== (const char4& A, const char4& B) {
return A.x == B.x && A.y == B.y && A.z == B.z && A.w == B.w;
}
HOST_DEVICE bool operator!= (const char4& A, const char4& B) {
return A.x != B.x || A.y != B.y || A.z != B.z || A.w != B.w;
}
HOST_DEVICE bool operator< (const char4& A, const char4& B) {
return A.x < B.x || (A.x == B.x && A.y < B.y) ||
(A.y == B.y && A.z < B.z) ||
(A.z == B.z && A.w < B.w);
}
HOST_DEVICE bool operator<= (const char4& A, const char4& B) {
return A.x <= B.x && A.y <= B.y && A.z <= B.z && A.w <= B.w;
}
HOST_DEVICE bool operator>= (const char4& A, const char4& B) {
return A.x >= B.x && A.y >= B.y && A.z >= B.z && A.w >= B.w;
}
HOST_DEVICE bool operator> (const char4& A, const char4& B) {
return A.x > B.x || (A.x == B.x && A.y > B.y)
|| (A.y == B.y && A.z > B.z)
|| (A.z == B.z && A.w > B.w);
}
//------------------------------------------------------------------------------
HOST_DEVICE bool operator== (const uchar4& A, const uchar4& B) {
return A.x == B.x && A.y == B.y && A.z == B.z && A.w == B.w;
}
HOST_DEVICE bool operator!= (const uchar4& A, const uchar4& B) {
return A.x != B.x || A.y != B.y || A.z != B.z || A.w != B.w;
}
HOST_DEVICE bool operator< (const uchar4& A, const uchar4& B) {
return A.x < B.x || (A.x == B.x && A.y < B.y) ||
(A.y == B.y && A.z < B.z) ||
(A.z == B.z && A.w < B.w);
}
HOST_DEVICE bool operator<= (const uchar4& A, const uchar4& B) {
return A.x <= B.x && A.y <= B.y && A.z <= B.z && A.w <= B.w;
}
HOST_DEVICE bool operator>= (const uchar4& A, const uchar4& B) {
return A.x >= B.x && A.y >= B.y && A.z >= B.z && A.w >= B.w;
}
HOST_DEVICE bool operator> (const uchar4& A, const uchar4& B) {
return A.x > B.x || (A.x == B.x && A.y > B.y)
|| (A.y == B.y && A.z > B.z)
|| (A.z == B.z && A.w > B.w);
}
//------------------------------------------------------------------------------
HOST_DEVICE bool operator== (const short4& A, const short4& B) {
return A.x == B.x && A.y == B.y && A.z == B.z && A.w == B.w;
}
HOST_DEVICE bool operator!= (const short4& A, const short4& B) {
return A.x != B.x || A.y != B.y || A.z != B.z || A.w != B.w;
}
HOST_DEVICE bool operator< (const short4& A, const short4& B) {
return A.x < B.x || (A.x == B.x && A.y < B.y) ||
(A.y == B.y && A.z < B.z) ||
(A.z == B.z && A.w < B.w);
}
HOST_DEVICE bool operator<= (const short4& A, const short4& B) {
return A.x <= B.x && A.y <= B.y && A.z <= B.z && A.w <= B.w;
}
HOST_DEVICE bool operator>= (const short4& A, const short4& B) {
return A.x >= B.x && A.y >= B.y && A.z >= B.z && A.w >= B.w;
}
HOST_DEVICE bool operator> (const short4& A, const short4& B) {
return A.x > B.x || (A.x == B.x && A.y > B.y)
|| (A.y == B.y && A.z > B.z)
|| (A.z == B.z && A.w > B.w);
}
//------------------------------------------------------------------------------
HOST_DEVICE bool operator== (const ushort4& A, const ushort4& B) {
return A.x == B.x && A.y == B.y && A.z == B.z && A.w == B.w;
}
HOST_DEVICE bool operator!= (const ushort4& A, const ushort4& B) {
return A.x != B.x || A.y != B.y || A.z != B.z || A.w != B.w;
}
HOST_DEVICE bool operator< (const ushort4& A, const ushort4& B) {
return A.x < B.x || (A.x == B.x && A.y < B.y) ||
(A.y == B.y && A.z < B.z) ||
(A.z == B.z && A.w < B.w);
}
HOST_DEVICE bool operator<= (const ushort4& A, const ushort4& B) {
return A.x <= B.x && A.y <= B.y && A.z <= B.z && A.w <= B.w;
}
HOST_DEVICE bool operator>= (const ushort4& A, const ushort4& B) {
return A.x >= B.x && A.y >= B.y && A.z >= B.z && A.w >= B.w;
}
HOST_DEVICE bool operator> (const ushort4& A, const ushort4& B) {
return A.x > B.x || (A.x == B.x && A.y > B.y)
|| (A.y == B.y && A.z > B.z)
|| (A.z == B.z && A.w > B.w);
}
//------------------------------------------------------------------------------
HOST_DEVICE bool operator== (const int4& A, const int4& B) {
return A.x == B.x && A.y == B.y && A.z == B.z && A.w == B.w;
}
HOST_DEVICE bool operator!= (const int4& A, const int4& B) {
return A.x != B.x || A.y != B.y || A.z != B.z || A.w != B.w;
}
HOST_DEVICE bool operator< (const int4& A, const int4& B) {
return A.x < B.x || (A.x == B.x && A.y < B.y) ||
(A.y == B.y && A.z < B.z) ||
(A.z == B.z && A.w < B.w);
}
HOST_DEVICE bool operator<= (const int4& A, const int4& B) {
return A.x <= B.x && A.y <= B.y && A.z <= B.z && A.w <= B.w;
}
HOST_DEVICE bool operator>= (const int4& A, const int4& B) {
return A.x >= B.x && A.y >= B.y && A.z >= B.z && A.w >= B.w;
}
HOST_DEVICE bool operator> (const int4& A, const int4& B) {
return A.x > B.x || (A.x == B.x && A.y > B.y)
|| (A.y == B.y && A.z > B.z)
|| (A.z == B.z && A.w > B.w);
}
//------------------------------------------------------------------------------
HOST_DEVICE bool operator== (const uint4& A, const uint4& B) {
return A.x == B.x && A.y == B.y && A.z == B.z && A.w == B.w;
}
HOST_DEVICE bool operator!= (const uint4& A, const uint4& B) {
return A.x != B.x || A.y != B.y || A.z != B.z || A.w != B.w;
}
HOST_DEVICE bool operator< (const uint4& A, const uint4& B) {
return A.x < B.x || (A.x == B.x && A.y < B.y) ||
(A.y == B.y && A.z < B.z) ||
(A.z == B.z && A.w < B.w);
}
HOST_DEVICE bool operator<= (const uint4& A, const uint4& B) {
return A.x <= B.x && A.y <= B.y && A.z <= B.z && A.w <= B.w;
}
HOST_DEVICE bool operator>= (const uint4& A, const uint4& B) {
return A.x >= B.x && A.y >= B.y && A.z >= B.z && A.w >= B.w;
}
HOST_DEVICE bool operator> (const uint4& A, const uint4& B) {
return A.x > B.x || (A.x == B.x && A.y > B.y)
|| (A.y == B.y && A.z > B.z)
|| (A.z == B.z && A.w > B.w);
}
//------------------------------------------------------------------------------
HOST_DEVICE bool operator== (const float4& A, const float4& B) {
return A.x == B.x && A.y == B.y && A.z == B.z && A.w == B.w;
}
HOST_DEVICE bool operator!= (const float4& A, const float4& B) {
return A.x != B.x || A.y != B.y || A.z != B.z || A.w != B.w;
}
HOST_DEVICE bool operator< (const float4& A, const float4& B) {
return A.x < B.x || (A.x == B.x && A.y < B.y) ||
(A.y == B.y && A.z < B.z) ||
(A.z == B.z && A.w < B.w);
}
HOST_DEVICE bool operator<= (const float4& A, const float4& B) {
return A.x <= B.x && A.y <= B.y && A.z <= B.z && A.w <= B.w;
}
HOST_DEVICE bool operator>= (const float4& A, const float4& B) {
return A.x >= B.x && A.y >= B.y && A.z >= B.z && A.w >= B.w;
}
HOST_DEVICE bool operator> (const float4& A, const float4& B) {
return A.x > B.x || (A.x == B.x && A.y > B.y)
|| (A.y == B.y && A.z > B.z)
|| (A.z == B.z && A.w > B.w);
}
// =============================================================================
namespace xlib {
template<>
struct Make2Str<char> {
using type = char2;
__host__ __device__ __forceinline__
static type get(char a, char b) {
return make_char2(a, b);
}
};
template<>
struct Make2Str<unsigned char> {
using type = uchar2;
__host__ __device__ __forceinline__
static type get(unsigned char a, unsigned char b) {
return make_uchar2(a, b);
}
};
template<>
struct Make2Str<short> {
using type = short2;
__host__ __device__ __forceinline__
static type get(short a, short b) {
return make_short2(a, b);
}
};
template<>
struct Make2Str<unsigned short> {
using type = ushort2;
__host__ __device__ __forceinline__
static type get(unsigned short a, unsigned short b) {
return make_ushort2(a, b);
}
};
template<>
struct Make2Str<int> {
using type = int2;
__host__ __device__ __forceinline__
static type get(int a, int b) {
return make_int2(a, b);
}
};
template<>
struct Make2Str<unsigned> {
using type = float2;
__host__ __device__ __forceinline__
static type get(unsigned a, unsigned b) {
return make_float2(a, b);
}
};
template<>
struct Make2Str<long long> {
using type = longlong2;
__host__ __device__ __forceinline__
static type get(long long a, long long b) {
return make_longlong2(a, b);
}
};
template<>
struct Make2Str<long long unsigned> {
using type = ulonglong2;
__host__ __device__ __forceinline__
static type get(long long unsigned a, long long unsigned b) {
return make_ulonglong2(a, b);
}
};
template<>
struct Make2Str<float> {
using type = float2;
__host__ __device__ __forceinline__
static type get(float a, float b) {
return make_float2(a, b);
}
};
template<> struct Make2Str<double> {
using type = double2;
__host__ __device__ __forceinline__
static type get(double a, double b) {
return make_double2(a, b);
}
};
//------------------------------------------------------------------------------
template<>
struct Make4Str<char> {
using type = char4;
__host__ __device__ __forceinline__
static type get(char a, char b, char c, char d) {
return make_char4(a, b, c, d);
}
};
template<>
struct Make4Str<unsigned char> {
using type = uchar4;
__host__ __device__ __forceinline__
static type get(unsigned char a, unsigned char b,
unsigned char c, unsigned char d) {
return make_uchar4(a, b, c, d);
}
};
template<>
struct Make4Str<short> {
using type = short4;
__host__ __device__ __forceinline__
static type get(short a, short b, short c, short d) {
return make_short4(a, b, c, d);
}
};
template<>
struct Make4Str<unsigned short> {
using type = ushort4;
__host__ __device__ __forceinline__
static type get(unsigned short a, unsigned short b,
unsigned short c, unsigned short d) {
return make_ushort4(a, b, c, d);
}
};
template<>
struct Make4Str<int> {
using type = int4;
__host__ __device__ __forceinline__
static type get(int a, int b, int c, int d) {
return make_int4(a, b, c, d);
}
};
template<>
struct Make4Str<unsigned> {
using type = uint4;
__host__ __device__ __forceinline__
static type get(unsigned a, unsigned b, unsigned c, unsigned d) {
return make_uint4(a, b, c, d);
}
};
template<>
struct Make4Str<float> {
using type = float4;
__host__ __device__ __forceinline__
static type get(float a, float b, float c, float d) {
return make_float4(a, b, c, d);
}
};
//==============================================================================
template<typename T>
__host__ __device__ __forceinline__
typename Make2Str<T>::type make2(T a, T b) {
return Make2Str<T>::get(a, b);
}
template<typename T>
__host__ __device__ __forceinline__
typename Make2Str<T>::type make4(T a, T b, T c, T d) {
return Make4Str<T>::get(a, b, c, d);
}
} // namespace xlib
|
the_stack
|
// order: (shouldFlipX, shouldFlipY, shouldFlipZ)
DEVICE uint8 next_child(uint8 order, uint8 mask)
{
for (uint8 child = 0; child < 8; ++child)
{
uint8 childInOrder = child ^ order;
if (mask & (1u << childInOrder))
return childInOrder;
}
check(false);
return 0;
}
template<bool isRoot, typename TDAG>
DEVICE uint8 compute_intersection_mask(
uint32 level,
const Path& path,
const TDAG& dag,
const float3& rayOrigin,
const float3& rayDirection,
const float3& rayDirectionInverted)
{
// Find node center = .5 * (boundsMin + boundsMax) + .5f
const uint32 shift = dag.levels - level;
const float radius = float(1u << (shift - 1));
const float3 center = make_float3(radius) + path.as_position(shift);
const float3 centerRelativeToRay = center - rayOrigin;
// Ray intersection with axis-aligned planes centered on the node
// => rayOrg + tmid * rayDir = center
const float3 tmid = centerRelativeToRay * rayDirectionInverted;
// t-values for where the ray intersects the slabs centered on the node
// and extending to the side of the node
float tmin, tmax;
{
const float3 slabRadius = radius * abs(rayDirectionInverted);
const float3 pmin = tmid - slabRadius;
tmin = max(max(pmin), .0f);
const float3 pmax = tmid + slabRadius;
tmax = min(pmax);
}
// Check if we actually hit the root node
// This test may not be entirely safe due to float precision issues.
// especially on lower levels. For the root node this seems OK, though.
if (isRoot && (tmin >= tmax))
{
return 0;
}
// Identify first child that is intersected
// NOTE: We assume that we WILL hit one child, since we assume that the
// parents bounding box is hit.
// NOTE: To safely get the correct node, we cannot use o+ray_tmin*d as the
// intersection point, since this point might lie too close to an
// axis plane. Instead, we use the midpoint between max and min which
// will lie in the correct node IF the ray only intersects one node.
// Otherwise, it will still lie in an intersected node, so there are
// no false positives from this.
uint8 intersectionMask = 0;
{
const float3 pointOnRay = (0.5f * (tmin + tmax)) * rayDirection;
uint8 const firstChild =
((pointOnRay.x >= centerRelativeToRay.x) ? 4 : 0) +
((pointOnRay.y >= centerRelativeToRay.y) ? 2 : 0) +
((pointOnRay.z >= centerRelativeToRay.z) ? 1 : 0);
intersectionMask |= (1u << firstChild);
}
// We now check the points where the ray intersects the X, Y and Z plane.
// If the intersection is within (ray_tmin, ray_tmax) then the intersection
// point implies that two voxels will be touched by the ray. We find out
// which voxels to mask for an intersection point at +X, +Y by setting
// ALL voxels at +X and ALL voxels at +Y and ANDing these two masks.
//
// NOTE: When the intersection point is close enough to another axis plane,
// we must check both sides or we will get robustness issues.
const float epsilon = 1e-4f;
if (tmin <= tmid.x && tmid.x <= tmax)
{
const float3 pointOnRay = tmid.x * rayDirection;
uint8 A = 0;
if (pointOnRay.y >= centerRelativeToRay.y - epsilon) A |= 0xCC;
if (pointOnRay.y <= centerRelativeToRay.y + epsilon) A |= 0x33;
uint8 B = 0;
if (pointOnRay.z >= centerRelativeToRay.z - epsilon) B |= 0xAA;
if (pointOnRay.z <= centerRelativeToRay.z + epsilon) B |= 0x55;
intersectionMask |= A & B;
}
if (tmin <= tmid.y && tmid.y <= tmax)
{
const float3 pointOnRay = tmid.y * rayDirection;
uint8 C = 0;
if (pointOnRay.x >= centerRelativeToRay.x - epsilon) C |= 0xF0;
if (pointOnRay.x <= centerRelativeToRay.x + epsilon) C |= 0x0F;
uint8 D = 0;
if (pointOnRay.z >= centerRelativeToRay.z - epsilon) D |= 0xAA;
if (pointOnRay.z <= centerRelativeToRay.z + epsilon) D |= 0x55;
intersectionMask |= C & D;
}
if (tmin <= tmid.z && tmid.z <= tmax)
{
const float3 pointOnRay = tmid.z * rayDirection;
uint8 E = 0;
if (pointOnRay.x >= centerRelativeToRay.x - epsilon) E |= 0xF0;
if (pointOnRay.x <= centerRelativeToRay.x + epsilon) E |= 0x0F;
uint8 F = 0;
if (pointOnRay.y >= centerRelativeToRay.y - epsilon) F |= 0xCC;
if (pointOnRay.y <= centerRelativeToRay.y + epsilon) F |= 0x33;
intersectionMask |= E & F;
}
return intersectionMask;
}
struct StackEntry
{
uint32 index;
uint8 childMask;
uint8 visitMask;
};
template<typename TDAG>
__global__ void Tracer::trace_paths(const TracePathsParams traceParams, const TDAG dag)
{
// Target pixel coordinate
const uint2 pixel = make_uint2(
blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
if (pixel.x >= imageWidth || pixel.y >= imageHeight)
return; // outside.
// Pre-calculate per-pixel data
const float3 rayOrigin = make_float3(traceParams.cameraPosition);
const float3 rayDirection = make_float3(normalize(traceParams.rayMin + pixel.x * traceParams.rayDDx + pixel.y * traceParams.rayDDy - traceParams.cameraPosition));
const float3 rayDirectionInverse = make_float3(make_double3(1. / rayDirection.x, 1. / rayDirection.y, 1. / rayDirection.z));
const uint8 rayChildOrder =
(rayDirection.x < 0.f ? 4 : 0) +
(rayDirection.y < 0.f ? 2 : 0) +
(rayDirection.z < 0.f ? 1 : 0);
// State
uint32 level = 0;
Path path(0, 0, 0);
StackEntry stack[MAX_LEVELS];
StackEntry cache;
Leaf cachedLeaf; // needed to iterate on the last few levels
cache.index = dag.get_first_node_index();
cache.childMask = Utils::child_mask(dag.get_node(0, cache.index));
cache.visitMask = cache.childMask & compute_intersection_mask<true>(0, path, dag, rayOrigin, rayDirection, rayDirectionInverse);
// Traverse DAG
for (;;)
{
// Ascend if there are no children left.
{
uint32 newLevel = level;
while (newLevel > 0 && !cache.visitMask)
{
newLevel--;
cache = stack[newLevel];
}
if (newLevel == 0 && !cache.visitMask)
{
path = Path(0, 0, 0);
break;
}
path.ascend(level - newLevel);
level = newLevel;
}
// Find next child in order by the current ray's direction
const uint8 nextChild = next_child(rayChildOrder, cache.visitMask);
// Mark it as handled
cache.visitMask &= ~(1u << nextChild);
// Intersect that child with the ray
{
path.descend(nextChild);
stack[level] = cache;
level++;
// If we're at the final level, we have intersected a single voxel.
if (level == dag.levels)
{
break;
}
// Are we in an internal node?
if (level < dag.leaf_level())
{
cache.index = dag.get_child_index(level - 1, cache.index, cache.childMask, nextChild);
cache.childMask = Utils::child_mask(dag.get_node(level, cache.index));
cache.visitMask = cache.childMask & compute_intersection_mask<false>(level, path, dag, rayOrigin, rayDirection, rayDirectionInverse);
}
else
{
/* The second-to-last and last levels are different: the data
* of these two levels (2^3 voxels) are packed densely into a
* single 64-bit word.
*/
uint8 childMask;
if (level == dag.leaf_level())
{
const uint32 addr = dag.get_child_index(level - 1, cache.index, cache.childMask, nextChild);
cachedLeaf = dag.get_leaf(addr);
childMask = cachedLeaf.get_first_child_mask();
}
else
{
childMask = cachedLeaf.get_second_child_mask(nextChild);
}
// No need to set the index for bottom nodes
cache.childMask = childMask;
cache.visitMask = cache.childMask & compute_intersection_mask<false>(level, path, dag, rayOrigin, rayDirection, rayDirectionInverse);
}
}
}
path.store(pixel.x, imageHeight - 1 - pixel.y, traceParams.pathsSurface);
}
template<typename TDAG, typename TDAGColors>
__global__ void Tracer::trace_colors(const TraceColorsParams traceParams, const TDAG dag, const TDAGColors colors)
{
const uint2 pixel = make_uint2(
blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
if (pixel.x >= imageWidth || pixel.y >= imageHeight)
return; // outside
const auto setColorImpl = [&](uint32 color)
{
surf2Dwrite(color, traceParams.colorsSurface, (int)sizeof(uint32) * pixel.x, pixel.y, cudaBoundaryModeClamp);
};
const Path path = Path::load(pixel.x, pixel.y, traceParams.pathsSurface);
if (path.is_null())
{
setColorImpl(ColorUtils::float3_to_rgb888(make_float3(187, 242, 250) / 255.f));
return;
}
const float toolStrength = traceParams.toolInfo.strength(path);
const auto setColor = [&](uint32 color)
{
#if TOOL_OVERLAY
if (toolStrength > 0)
{
color = ColorUtils::float3_to_rgb888(lerp(ColorUtils::rgb888_to_float3(color), make_float3(1, 0, 0), clamp(100 * toolStrength, 0.f, .5f)));
}
#endif
setColorImpl(color);
};
const auto invalidColor = [&]()
{
uint32 b = (path.path.x ^ path.path.y ^ path.path.z) & 0x1;
setColor(ColorUtils::float3_to_rgb888(make_float3(1, b, 1.f - b)));
};
uint64 nof_leaves = 0;
uint32 debugColorsIndex = 0;
uint32 colorNodeIndex = 0;
typename TDAGColors::ColorLeaf colorLeaf = colors.get_default_leaf();
uint32 level = 0;
uint32 nodeIndex = dag.get_first_node_index();
while (level < dag.leaf_level())
{
level++;
// Find the current childmask and which subnode we are in
const uint32 node = dag.get_node(level - 1, nodeIndex);
const uint8 childMask = Utils::child_mask(node);
const uint8 child = path.child_index(level, dag.levels);
// Make sure the node actually exists
if (!(childMask & (1 << child)))
{
setColor(0xFF00FF);
return;
}
ASSUME(level > 0);
if (level - 1 < colors.get_color_tree_levels())
{
colorNodeIndex = colors.get_child_index(level - 1, colorNodeIndex, child);
if (level == colors.get_color_tree_levels())
{
check(nof_leaves == 0);
colorLeaf = colors.get_leaf(colorNodeIndex);
}
else
{
// TODO nicer interface
if (!colorNodeIndex)
{
invalidColor();
return;
}
}
}
// Debug
if (traceParams.debugColors == EDebugColors::Index ||
traceParams.debugColors == EDebugColors::Position ||
traceParams.debugColors == EDebugColors::ColorTree)
{
if (traceParams.debugColors == EDebugColors::Index &&
traceParams.debugColorsIndexLevel == level - 1)
{
debugColorsIndex = nodeIndex;
}
if (level == dag.leaf_level())
{
if (traceParams.debugColorsIndexLevel == dag.leaf_level())
{
check(debugColorsIndex == 0);
const uint32 childIndex = dag.get_child_index(level - 1, nodeIndex, childMask, child);
debugColorsIndex = childIndex;
}
if (traceParams.debugColors == EDebugColors::Index)
{
setColor(Utils::murmurhash32(debugColorsIndex));
}
else if (traceParams.debugColors == EDebugColors::Position)
{
constexpr uint32 checkerSize = 0x7FF;
float color = ((path.path.x ^ path.path.y ^ path.path.z) & checkerSize) / float(checkerSize);
color = (color + 0.5) / 2;
setColor(ColorUtils::float3_to_rgb888(Utils::has_flag(nodeIndex) ? make_float3(color, 0, 0) : make_float3(color)));
}
else
{
check(traceParams.debugColors == EDebugColors::ColorTree);
const uint32 offset = dag.levels - colors.get_color_tree_levels();
const float color = ((path.path.x >> offset) ^ (path.path.y >> offset) ^ (path.path.z >> offset)) & 0x1;
setColor(ColorUtils::float3_to_rgb888(make_float3(color)));
}
return;
}
else
{
nodeIndex = dag.get_child_index(level - 1, nodeIndex, childMask, child);
continue;
}
}
//////////////////////////////////////////////////////////////////////////
// Find out how many leafs are in the children preceding this
//////////////////////////////////////////////////////////////////////////
// If at final level, just count nof children preceding and exit
if (level == dag.leaf_level())
{
for (uint8 childBeforeChild = 0; childBeforeChild < child; ++childBeforeChild)
{
if (childMask & (1u << childBeforeChild))
{
const uint32 childIndex = dag.get_child_index(level - 1, nodeIndex, childMask, childBeforeChild);
const Leaf leaf = dag.get_leaf(childIndex);
nof_leaves += Utils::popcll(leaf.to_64());
}
}
const uint32 childIndex = dag.get_child_index(level - 1, nodeIndex, childMask, child);
const Leaf leaf = dag.get_leaf(childIndex);
const uint8 leafBitIndex =
(((path.path.x & 0x1) == 0) ? 0 : 4) |
(((path.path.y & 0x1) == 0) ? 0 : 2) |
(((path.path.z & 0x1) == 0) ? 0 : 1) |
(((path.path.x & 0x2) == 0) ? 0 : 32) |
(((path.path.y & 0x2) == 0) ? 0 : 16) |
(((path.path.z & 0x2) == 0) ? 0 : 8);
nof_leaves += Utils::popcll(leaf.to_64() & ((uint64(1) << leafBitIndex) - 1));
break;
}
else
{
ASSUME(level > 0);
if (level > colors.get_color_tree_levels())
{
// Otherwise, fetch the next node (and accumulate leaves we pass by)
for (uint8 childBeforeChild = 0; childBeforeChild < child; ++childBeforeChild)
{
if (childMask & (1u << childBeforeChild))
{
const uint32 childIndex = dag.get_child_index(level - 1, nodeIndex, childMask, childBeforeChild);
const uint32 childNode = dag.get_node(level, childIndex);
nof_leaves += colors.get_leaves_count(level, childNode);
}
}
}
nodeIndex = dag.get_child_index(level - 1, nodeIndex, childMask, child);
}
}
if (!colorLeaf.is_valid() || !colorLeaf.is_valid_index(nof_leaves))
{
invalidColor();
return;
}
auto compressedColor = colorLeaf.get_color(nof_leaves);
uint32 color =
traceParams.debugColors == EDebugColors::ColorBits
? compressedColor.get_debug_hash()
: ColorUtils::float3_to_rgb888(
traceParams.debugColors == EDebugColors::MinColor
? compressedColor.get_min_color()
: traceParams.debugColors == EDebugColors::MaxColor
? compressedColor.get_max_color()
: traceParams.debugColors == EDebugColors::Weight
? make_float3(compressedColor.get_weight())
: compressedColor.get_color());
setColor(color);
}
template<typename TDAG>
inline __device__ bool intersect_ray_node_out_of_order(const TDAG& dag, const float3 rayOrigin, const float3 rayDirection)
{
const float3 rayDirectionInverse = make_float3(make_double3(1. / rayDirection.x, 1. / rayDirection.y, 1. / rayDirection.z));
// State
uint32 level = 0;
Path path(0, 0, 0);
StackEntry stack[MAX_LEVELS];
StackEntry cache;
Leaf cachedLeaf; // needed to iterate on the last few levels
cache.index = dag.get_first_node_index();
cache.childMask = Utils::child_mask(dag.get_node(0, cache.index));
cache.visitMask = cache.childMask & compute_intersection_mask<true>(0, path, dag, rayOrigin, rayDirection, rayDirectionInverse);
// Traverse DAG
for (;;)
{
// Ascend if there are no children left.
{
uint32 newLevel = level;
while (newLevel > 0 && !cache.visitMask)
{
newLevel--;
cache = stack[newLevel];
}
if (newLevel == 0 && !cache.visitMask)
{
path = Path(0, 0, 0);
break;
}
path.ascend(level - newLevel);
level = newLevel;
}
// Find next child in order by the current ray's direction
const uint8 nextChild = 31 - __clz(cache.visitMask);
// Mark it as handled
cache.visitMask &= ~(1u << nextChild);
// Intersect that child with the ray
{
path.descend(nextChild);
stack[level] = cache;
level++;
// If we're at the final level, we have intersected a single voxel.
if (level == dag.levels)
{
return true;
}
// Are we in an internal node?
if (level < dag.leaf_level())
{
cache.index = dag.get_child_index(level - 1, cache.index, cache.childMask, nextChild);
cache.childMask = Utils::child_mask(dag.get_node(level, cache.index));
cache.visitMask = cache.childMask & compute_intersection_mask<false>(level, path, dag, rayOrigin, rayDirection, rayDirectionInverse);
}
else
{
/* The second-to-last and last levels are different: the data
* of these two levels (2^3 voxels) are packed densely into a
* single 64-bit word.
*/
uint8 childMask;
if (level == dag.leaf_level())
{
const uint32 addr = dag.get_child_index(level - 1, cache.index, cache.childMask, nextChild);
cachedLeaf = dag.get_leaf(addr);
childMask = cachedLeaf.get_first_child_mask();
}
else
{
childMask = cachedLeaf.get_second_child_mask(nextChild);
}
// No need to set the index for bottom nodes
cache.childMask = childMask;
cache.visitMask = cache.childMask & compute_intersection_mask<false>(level, path, dag, rayOrigin, rayDirection, rayDirectionInverse);
}
}
}
return false;
}
// Directed towards the sun
HOST_DEVICE float3 sun_direction()
{
return normalize(make_float3(0.3f, 1.f, 0.5f));
}
HOST_DEVICE float3 applyFog(float3 rgb, // original color of the pixel
double distance, // camera to point distance
double3 rayDir, // camera to point vector
double3 rayOri,
float fogDensity) // camera position
{
#if 0
constexpr float fogDensity = 0.0001f;
constexpr float c = 1.f;
constexpr float heightOffset = 20000.f;
constexpr float heightScale = 1.f;
double fogAmount = c * exp((heightOffset - rayOri.y * heightScale) * fogDensity) * (1.0 - exp(-distance * rayDir.y * fogDensity)) / rayDir.y;
#else
fogDensity *= 0.00001f;
double fogAmount = 1.0 - exp(-distance * fogDensity);
#endif
double sunAmount = 1.01f * max(dot(rayDir, make_double3(sun_direction())), 0.0);
float3 fogColor = lerp(make_float3(187, 242, 250) / 255.f, // blue
make_float3(1.0f), // white
float(pow(sunAmount, 30.0)));
return lerp(rgb, fogColor, clamp(float(fogAmount), 0.f, 1.f));
}
HOST_DEVICE double3 ray_box_intersection(double3 orig, double3 dir, double3 box_min, double3 box_max)
{
double3 tmin = (box_min - orig) / dir;
double3 tmax = (box_max - orig) / dir;
double3 real_min = min(tmin, tmax);
double3 real_max = max(tmin, tmax);
// double minmax = min(min(real_max.x, real_max.y), real_max.z);
double maxmin = max(max(real_min.x, real_min.y), real_min.z);
// checkf(minmax >= maxmin, "%f > %f", minmax, maxmin);
return orig + dir * maxmin;
}
template<typename TDAG>
__global__ void Tracer::trace_shadows(const TraceShadowsParams params, const TDAG dag)
{
const uint2 pixel = make_uint2(
blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
if (pixel.x >= imageWidth || pixel.y >= imageHeight)
return; // outside
const auto setColorImpl = [&](float3 color)
{
const uint32 finalColor = ColorUtils::float3_to_rgb888(color);
surf2Dwrite(finalColor, params.colorsSurface, (int)sizeof(uint32) * pixel.x, pixel.y, cudaBoundaryModeClamp);
};
const auto setColor = [&](float light, double distance, double3 direction)
{
const uint32 colorInt = surf2Dread<uint32>(params.colorsSurface, pixel.x * sizeof(uint32), pixel.y);
float3 color = ColorUtils::rgb888_to_float3(colorInt);
color = color * clamp(0.5f + light, 0.f, 1.f);
color = applyFog(
color,
distance,
direction,
params.cameraPosition,
params.fogDensity);
setColorImpl(color);
};
const float3 rayOrigin = make_float3(Path::load(pixel.x, pixel.y, params.pathsSurface).path);
const double3 cameraRayDirection = normalize(params.rayMin + pixel.x * params.rayDDx + (imageHeight - 1 - pixel.y) * params.rayDDy - params.cameraPosition);
#if EXACT_SHADOWS || PER_VOXEL_FACE_SHADING
const double3 rayOriginDouble = make_double3(rayOrigin);
const double3 hitPosition = ray_box_intersection(
params.cameraPosition,
cameraRayDirection,
rayOriginDouble,
rayOriginDouble + 1);
#endif
#if EXACT_SHADOWS
const float3 shadowStart = make_float3(hitPosition);
#else
const float3 shadowStart = rayOrigin;
#endif
#if 0
setColorImpl(make_float3(clamp_vector(normal, 0, 1)));
return;
#endif
if (length(rayOrigin) == 0.0f)
{
setColor(1, 1e9, cameraRayDirection);
return; // Discard cleared or light-backfacing fragments
}
const float3 direction = sun_direction();
const bool isShadowed = intersect_ray_node_out_of_order(dag, shadowStart + params.shadowBias * direction, direction);
const double3 v = make_double3(rayOrigin) - params.cameraPosition;
const double distance = length(v);
const double3 nv = v / distance;
if (isShadowed)
{
setColor(0, distance, nv);
}
else
{
#if PER_VOXEL_FACE_SHADING
const double3 voxelOriginToHitPosition = normalize(hitPosition - (rayOriginDouble + 0.5));
const auto truncate_signed = [](double3 d) { return make_double3(int32(d.x), int32(d.y), int32(d.z)); };
const double3 normal = truncate_signed(voxelOriginToHitPosition / max(abs(voxelOriginToHitPosition)));
setColor(max(0.f, dot(make_float3(normal), sun_direction())), distance, nv);
#else
setColor(1, distance, nv);
#endif
}
#if 0 // AO code copy-pasted from Erik's impl, doesn't compile at all
constexpr int sqrtNofSamples = 8;
float avgSum = 0;
for (int y = 0; y < sqrtNofSamples; y++)
{
for (int x = 0; x < sqrtNofSamples; x++)
{
int2 coord = make_int2(blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y);
float3 normal = make_float3(tex2D(normalTexture, float(coord.x), float(coord.y)));
float3 tangent = normalize3(perp3(normal));
float3 bitangent = cross(normal, tangent);
//int2 randomCoord = make_int2((coord.x * sqrtNofSamples + x + randomSeed.x)%RAND_SIZE, (coord.y * sqrtNofSamples + y + randomSeed.y)%RAND_SIZE);
int2 randomCoord = make_int2((coord.x * sqrtNofSamples + x + randomSeed.x) & RAND_BITMASK, (coord.y * sqrtNofSamples + y + randomSeed.y) & RAND_BITMASK);
float2 randomSample = tex2D(randomTexture, randomCoord.x, randomCoord.y);
float randomLength = tex2D(randomTexture, randomCoord.y, randomCoord.x).x;
float2 dxdy = make_float2(1.0f / float(sqrtNofSamples), 1.0f / float(sqrtNofSamples));
float3 sample = cosineSampleHemisphere(make_float2(x * dxdy.x, y * dxdy.y) + (1.0 / float(sqrtNofSamples)) * randomSample);
float3 ray_d = normalize3(sample.x * tangent + sample.y * bitangent + sample.z * normal);
avgSum += intersectRayNode_outOfOrder<maxLevels>(ray_o, ray_d, ray_tmax * randomLength, rootCenter, rootRadius, coneOpening) ? 0.0f : 1.0f;
}
}
avgSum /= float(sqrtNofSamples * sqrtNofSamples);
#endif
}
template __global__ void Tracer::trace_paths<BasicDAG>(TracePathsParams, BasicDAG);
template __global__ void Tracer::trace_paths<HashDAG >(TracePathsParams, HashDAG);
template __global__ void Tracer::trace_shadows<BasicDAG>(TraceShadowsParams, BasicDAG);
template __global__ void Tracer::trace_shadows<HashDAG >(TraceShadowsParams, HashDAG);
#define COLORS_IMPL(Dag, Colors)\
template __global__ void Tracer::trace_colors<Dag, Colors>(TraceColorsParams, Dag, Colors);
COLORS_IMPL(BasicDAG, BasicDAGUncompressedColors)
COLORS_IMPL(BasicDAG, BasicDAGCompressedColors)
COLORS_IMPL(BasicDAG, BasicDAGColorErrors)
COLORS_IMPL(HashDAG, HashDAGColors)
|
the_stack
|
* Test of BlockMergeSort utilities
******************************************************************************/
// Ensure printing of CUDA runtime errors to console
#define CUB_STDERR
#include <stdio.h>
#include <limits>
#include <typeinfo>
#include <memory>
#include <cub/util_allocator.cuh>
#include <cub/block/block_merge_sort.cuh>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/random.h>
#include <thrust/sequence.h>
#include <thrust/shuffle.h>
#include <thrust/sort.h>
#include "test_util.h"
using namespace cub;
struct CustomType
{
std::uint8_t key;
std::uint64_t count;
__device__ __host__ CustomType()
: key(0)
, count(0)
{}
__device__ __host__ CustomType(std::uint64_t value)
: key(static_cast<std::uint8_t>(value))
, count(value)
{}
__device__ __host__ void operator=(std::uint64_t value)
{
key = static_cast<std::uint8_t>(value);
count = value;
}
};
struct CustomLess
{
template <typename DataType>
__device__ bool operator()(DataType &lhs, DataType &rhs)
{
return lhs < rhs;
}
__device__ bool operator()(CustomType &lhs, CustomType &rhs)
{
return lhs.key < rhs.key;
}
};
template <
typename DataType,
unsigned int ThreadsInBlock,
unsigned int ItemsPerThread,
bool Stable = false>
__global__ void BlockMergeSortTestKernel(DataType *data, unsigned int valid_items)
{
using BlockMergeSort =
cub::BlockMergeSort<DataType, ThreadsInBlock, ItemsPerThread>;
__shared__ typename BlockMergeSort::TempStorage temp_storage_shuffle;
DataType thread_data[ItemsPerThread];
const unsigned int thread_offset = threadIdx.x * ItemsPerThread;
for (unsigned int item = 0; item < ItemsPerThread; item++)
{
const unsigned int idx = thread_offset + item;
thread_data[item] = idx < valid_items ? data[idx] : DataType();
}
__syncthreads();
// Tests below use sequence to fill the data.
// Therefore the following value should be greater than any that
// is present in the input data.
const DataType oob_default =
static_cast<std::uint64_t>(ThreadsInBlock * ItemsPerThread + 1);
if (Stable)
{
if (valid_items == ThreadsInBlock * ItemsPerThread)
{
BlockMergeSort(temp_storage_shuffle).StableSort(
thread_data,
CustomLess());
}
else
{
BlockMergeSort(temp_storage_shuffle).StableSort(
thread_data,
CustomLess(),
valid_items,
oob_default);
}
}
else
{
if (valid_items == ThreadsInBlock * ItemsPerThread)
{
BlockMergeSort(temp_storage_shuffle).Sort(
thread_data,
CustomLess());
}
else
{
BlockMergeSort(temp_storage_shuffle).Sort(
thread_data,
CustomLess(),
valid_items,
oob_default);
}
}
for (unsigned int item = 0; item < ItemsPerThread; item++)
{
const unsigned int idx = thread_offset + item;
if (idx >= valid_items)
break;
data[idx] = thread_data[item];
}
}
template <
typename KeyType,
typename ValueType,
unsigned int ThreadsInBlock,
unsigned int ItemsPerThread,
bool Stable = false>
__global__ void BlockMergeSortTestKernel(KeyType *keys,
ValueType *values,
unsigned int valid_items)
{
using BlockMergeSort =
cub::BlockMergeSort<KeyType, ThreadsInBlock, ItemsPerThread, ValueType>;
__shared__ typename BlockMergeSort::TempStorage temp_storage_shuffle;
KeyType thread_keys[ItemsPerThread];
ValueType thread_values[ItemsPerThread];
const unsigned int thread_offset = threadIdx.x * ItemsPerThread;
for (unsigned int item = 0; item < ItemsPerThread; item++)
{
const unsigned int idx = thread_offset + item;
thread_keys[item] = idx < valid_items ? keys[idx] : KeyType();
thread_values[item] = idx < valid_items ? values[idx] : ValueType();
}
__syncthreads();
// Tests below use sequence to fill the data.
// Therefore the following value should be greater than any that
// is present in the input data.
const KeyType oob_default = ThreadsInBlock * ItemsPerThread + 1;
if (Stable)
{
if (valid_items == ThreadsInBlock * ItemsPerThread)
{
BlockMergeSort(temp_storage_shuffle).StableSort(
thread_keys,
thread_values,
CustomLess());
}
else
{
BlockMergeSort(temp_storage_shuffle).StableSort(
thread_keys,
thread_values,
CustomLess(),
valid_items,
oob_default);
}
}
else
{
if (valid_items == ThreadsInBlock * ItemsPerThread)
{
BlockMergeSort(temp_storage_shuffle).Sort(
thread_keys,
thread_values,
CustomLess());
}
else
{
BlockMergeSort(temp_storage_shuffle).Sort(
thread_keys,
thread_values,
CustomLess(),
valid_items,
oob_default);
}
}
for (unsigned int item = 0; item < ItemsPerThread; item++)
{
const unsigned int idx = thread_offset + item;
if (idx >= valid_items)
break;
keys[idx] = thread_keys[item];
values[idx] = thread_values[item];
}
}
template<
typename DataType,
unsigned int ItemsPerThread,
unsigned int ThreadsInBlock,
bool Stable = false>
void BlockMergeSortTest(DataType *data, unsigned int valid_items)
{
BlockMergeSortTestKernel<DataType, ThreadsInBlock, ItemsPerThread, Stable>
<<<1, ThreadsInBlock>>>(data, valid_items);
CubDebugExit(cudaPeekAtLastError());
CubDebugExit(cudaDeviceSynchronize());
}
template<
typename KeyType,
typename ValueType,
unsigned int ItemsPerThread,
unsigned int ThreadsInBlock>
void BlockMergeSortTest(KeyType *keys, ValueType *values, unsigned int valid_items)
{
BlockMergeSortTestKernel<KeyType, ValueType, ThreadsInBlock, ItemsPerThread>
<<<1, ThreadsInBlock>>>(keys, values, valid_items);
CubDebugExit(cudaPeekAtLastError());
CubDebugExit(cudaDeviceSynchronize());
}
template <typename DataType>
bool CheckResult(int num_items,
thrust::device_vector<DataType> &d_data,
thrust::host_vector<DataType> &h_data)
{
thrust::copy_n(d_data.begin(), num_items, h_data.begin());
for (int i = 0; i < num_items; i++)
{
if (h_data[i] != i)
{
return false;
}
}
return true;
}
template <
typename DataType,
unsigned int ItemsPerThread,
unsigned int ThreadsInBlock>
void Test(unsigned int num_items,
thrust::default_random_engine &rng,
thrust::device_vector<DataType> &d_data,
thrust::host_vector<DataType> &h_data)
{
thrust::sequence(d_data.begin(), d_data.end());
thrust::shuffle(d_data.begin(), d_data.end(), rng);
BlockMergeSortTest<DataType, ItemsPerThread, ThreadsInBlock>(
thrust::raw_pointer_cast(d_data.data()), num_items);
AssertTrue(CheckResult(num_items, d_data, h_data));
}
template <
typename KeyType,
typename ValueType,
unsigned int ItemsPerThread,
unsigned int ThreadsInBlock>
void Test(unsigned int num_items,
thrust::default_random_engine &rng,
thrust::device_vector<KeyType> &d_keys,
thrust::device_vector<ValueType> &d_values,
thrust::host_vector<ValueType> &h_data)
{
thrust::sequence(d_keys.begin(), d_keys.end());
thrust::shuffle(d_keys.begin(), d_keys.end(), rng);
thrust::copy_n(d_keys.begin(), num_items, d_values.begin());
BlockMergeSortTest<KeyType, ValueType, ItemsPerThread, ThreadsInBlock>(
thrust::raw_pointer_cast(d_keys.data()),
thrust::raw_pointer_cast(d_values.data()),
num_items);
AssertTrue(CheckResult(num_items, d_values, h_data));
}
template <
typename KeyType,
typename ValueType,
unsigned int ItemsPerThread,
unsigned int ThreadsInBlock>
void Test(thrust::default_random_engine &rng)
{
for (unsigned int num_items = ItemsPerThread * ThreadsInBlock;
num_items > 1;
num_items /= 2)
{
thrust::device_vector<KeyType> d_keys(num_items);
thrust::device_vector<ValueType> d_values(num_items);
thrust::host_vector<KeyType> h_keys(num_items);
thrust::host_vector<ValueType> h_values(num_items);
Test<KeyType, ItemsPerThread, ThreadsInBlock>(num_items,
rng,
d_keys,
h_keys);
Test<KeyType, ValueType, ItemsPerThread, ThreadsInBlock>(num_items,
rng,
d_keys,
d_values,
h_values);
}
}
template <unsigned int ItemsPerThread, unsigned int ThreadsPerBlock>
void Test(thrust::default_random_engine &rng)
{
Test<std::int32_t, std::int32_t, ItemsPerThread, ThreadsPerBlock>(rng);
Test<std::int64_t, std::int64_t, ItemsPerThread, ThreadsPerBlock>(rng);
// Mixed types
Test<std::int16_t, std::int64_t, ItemsPerThread, ThreadsPerBlock>(rng);
Test<std::int32_t, std::int64_t, ItemsPerThread, ThreadsPerBlock>(rng);
}
template <unsigned int ItemsPerThread>
void Test(thrust::default_random_engine &rng)
{
Test<ItemsPerThread, 32>(rng);
Test<ItemsPerThread, 256>(rng);
}
struct CountToType
{
__device__ __host__ CustomType operator()(std::uint64_t val)
{
return { val };
}
};
struct CountComparator
{
__device__ __host__ bool operator()(const CustomType &lhs, const CustomType &rhs)
{
if (lhs.key == rhs.key)
return lhs.count < rhs.count;
return lhs.key < rhs.key;
}
};
void TestStability()
{
constexpr unsigned int items_per_thread = 10;
constexpr unsigned int threads_per_block = 128;
constexpr unsigned int elements = items_per_thread * threads_per_block;
constexpr bool stable = true;
thrust::device_vector<CustomType> d_keys(elements);
thrust::device_vector<std::uint64_t> d_counts(elements);
thrust::sequence(d_counts.begin(), d_counts.end());
thrust::transform(d_counts.begin(), d_counts.end(), d_keys.begin(), CountToType{});
// Sort keys
BlockMergeSortTest<CustomType, items_per_thread, threads_per_block, stable>(
thrust::raw_pointer_cast(d_keys.data()),
elements);
// Check counts
AssertTrue(thrust::is_sorted(d_keys.begin(), d_keys.end(), CountComparator{}));
}
int main(int argc, char** argv)
{
CommandLineArgs args(argc, argv);
// Initialize device
CubDebugExit(args.DeviceInit());
thrust::default_random_engine rng;
Test<1>(rng);
Test<2>(rng);
Test<10>(rng);
Test<15>(rng);
Test<std::int32_t, std::int32_t, 1, 512>(rng);
Test<std::int64_t, std::int64_t, 2, 512>(rng);
TestStability();
return 0;
}
|
the_stack
|
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
mexPrintf("%s \n",msg);\
mexErrMsgIdAndTxt("TIGRE:Ax:interpolated_parallel",cudaGetErrorString(__err));\
} \
} while (0)
// Declare the texture reference.
texture<float, cudaTextureType3D , cudaReadModeElementType> tex;
#define MAXTREADS 1024
#define PROJ_PER_BLOCK 8
#define PIXEL_SIZE_BLOCK 8
/*GEOMETRY DEFINITION
*
* Detector plane, behind
* |-----------------------------|
* | |
* | |
* | |
* | |
* | +--------+ |
* | / /| |
* A Z | / / |*D |
* | | +--------+ | |
* | | | | | |
* | | | *O | + |
* --->y | | | / |
* / | | |/ |
* V X | +--------+ |
* |-----------------------------|
*
* *S
*
*
*
*
*
**/
void CreateTextureParallelInterp(float* image,Geometry geo,cudaArray** d_cuArrTex, cudaTextureObject_t *texImage,cudaStream_t* stream);
__constant__ Point3D projParamsArrayDev[4*PROJ_PER_BLOCK]; // Dev means it is on device
__constant__ float projFloatsArrayDev[2*PROJ_PER_BLOCK]; // Dev means it is on device
__global__ void kernelPixelDetector_parallel_interpolated( Geometry geo,
float* detector,
const int currProjSetNumber, const int totalNoOfProjections, cudaTextureObject_t tex)
{
// Point3D source ,
// Point3D deltaU,
// Point3D deltaV,
// Point3D uvOrigin,
// float DSO,
// float maxdist){
unsigned long u = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long v = blockIdx.y * blockDim.y + threadIdx.y;
unsigned long projNumber=threadIdx.z;
if (u>= geo.nDetecU || v>= geo.nDetecV || projNumber>=PROJ_PER_BLOCK)
return;
int indAlpha = currProjSetNumber*PROJ_PER_BLOCK+projNumber; // This is the ABSOLUTE projection number in the projection array
#if IS_FOR_MATLAB_TIGRE
size_t idx = (size_t)(u * geo.nDetecV + v)+ (size_t)projNumber*geo.nDetecV *geo.nDetecU ;
#else
size_t idx = (size_t)(v * geo.nDetecU + u)+ (size_t)projNumber*geo.nDetecV *geo.nDetecU ;
#endif
if(indAlpha>=totalNoOfProjections)
return;
Point3D uvOrigin = projParamsArrayDev[4*projNumber]; // 6*projNumber because we have 6 Point3D values per projection
Point3D deltaU = projParamsArrayDev[4*projNumber+1];
Point3D deltaV = projParamsArrayDev[4*projNumber+2];
Point3D source = projParamsArrayDev[4*projNumber+3];
float DSO = projFloatsArrayDev[2*projNumber+0];
float maxdist = projFloatsArrayDev[2*projNumber+1];
/////// Get coordinates XYZ of pixel UV
int pixelV = geo.nDetecV-v-1;
int pixelU = u;
float vectX,vectY,vectZ;
Point3D P;
P.x=(uvOrigin.x+pixelU*deltaU.x+pixelV*deltaV.x);
P.y=(uvOrigin.y+pixelU*deltaU.y+pixelV*deltaV.y);
P.z=(uvOrigin.z+pixelU*deltaU.z+pixelV*deltaV.z);
Point3D S;
S.x=(source.x+pixelU*deltaU.x+pixelV*deltaV.x);
S.y=(source.y+pixelU*deltaU.y+pixelV*deltaV.y);
S.z=(source.z+pixelU*deltaU.z+pixelV*deltaV.z);
// Length is the ray length in normalized space
double length=sqrtf((S.x-P.x)*(S.x-P.x)+(S.y-P.y)*(S.y-P.y)+(S.z-P.z)*(S.z-P.z));
//now legth is an integer of Nsamples that are required on this line
length=ceilf(length/geo.accuracy);//Divide the directional vector by an integer
vectX=(P.x -S.x)/(length);
vectY=(P.y -S.y)/(length);
vectZ=(P.z -S.z)/(length);
// //Integrate over the line
float tx,ty,tz;
float sum=0;
float i;
// limit the amount of mem access after the cube, but before the detector.
if ((2*DSO/geo.dVoxelX+maxdist)/geo.accuracy < length)
length=ceilf((2*DSO/geo.dVoxelX+maxdist)/geo.accuracy);
//Length is not actually a length, but the amount of memreads with given accuracy ("samples per voxel")
for (i=floorf(maxdist/geo.accuracy); i<=length; i=i+1){
tx=vectX*i+S.x;
ty=vectY*i+S.y;
tz=vectZ*i+S.z;
sum += tex3D<float>(tex, tx+0.5f, ty+0.5f, tz+0.5f); // this line is 94% of time.
}
float deltalength=sqrtf((vectX*geo.dVoxelX)*(vectX*geo.dVoxelX)+
(vectY*geo.dVoxelY)*(vectY*geo.dVoxelY)+
(vectZ*geo.dVoxelZ)*(vectZ*geo.dVoxelZ) );
detector[idx]=sum*deltalength;
}
int interpolation_projection_parallel(float * img, Geometry geo, float** result,float const * const angles,int nangles, const GpuIds& gpuids){
size_t num_bytes = geo.nDetecU*geo.nDetecV *PROJ_PER_BLOCK* sizeof(float);
float** dProjection=(float **)malloc(2*sizeof(float *));
for (int i = 0; i < 2; ++i){
cudaMalloc((void**)&dProjection[i], num_bytes);
cudaCheckErrors("cudaMalloc projections fail");
}
// allocate streams for memory and compute
int nStreams=2;
cudaStream_t* stream=(cudaStream_t*)malloc(nStreams*sizeof(cudaStream_t));;
for (int i = 0; i < 2; ++i){
cudaStreamCreate(&stream[i]);
}
// Texture object variables
cudaTextureObject_t *texImg = 0;
cudaArray **d_cuArrTex = 0;
texImg =(cudaTextureObject_t*)malloc(1*sizeof(cudaTextureObject_t));
d_cuArrTex =(cudaArray**)malloc(1*sizeof(cudaArray*));
CreateTextureParallelInterp(img,geo,&d_cuArrTex[0], &texImg[0],stream);
cudaCheckErrors("Texture allocation fail");
//Done! Image put into texture memory.
Point3D source, deltaU, deltaV, uvOrigin;
Point3D* projParamsArrayHost;
cudaMallocHost((void**)&projParamsArrayHost,4*PROJ_PER_BLOCK*sizeof(Point3D));
float* projFloatsArrayHost;
cudaMallocHost((void**)&projFloatsArrayHost,2*PROJ_PER_BLOCK*sizeof(float));
// 16x16 gave the best performance empirically
// Funnily that makes it compatible with most GPUs.....
int divU,divV,divangle;
divU=PIXEL_SIZE_BLOCK;
divV=PIXEL_SIZE_BLOCK;
dim3 numBlocks((geo.nDetecU+divU-1)/divU,(geo.nDetecV+divV-1)/divV,1);
dim3 threadsPerBlock(divU,divV,PROJ_PER_BLOCK);
unsigned int proj_global;
unsigned int noOfKernelCalls = (nangles+PROJ_PER_BLOCK-1)/PROJ_PER_BLOCK; // We'll take care of bounds checking inside the loop if nalpha is not divisible by PROJ_PER_BLOCK
unsigned int i;
float maxdist;
for ( i=0; i<noOfKernelCalls; i++){
for(unsigned int j=0; j<PROJ_PER_BLOCK; j++){
proj_global=i*PROJ_PER_BLOCK+j;
if (proj_global>=nangles)
break;
geo.alpha=angles[proj_global*3];
geo.theta=angles[proj_global*3+1];
geo.psi =angles[proj_global*3+2];
//precomute distances for faster execution
maxdist=maxdistanceCuboid(geo,proj_global);
//Precompute per angle constant stuff for speed
computeDeltas_parallel(geo,geo.alpha,proj_global, &uvOrigin, &deltaU, &deltaV, &source);
//Ray tracing!
projParamsArrayHost[4*j]=uvOrigin; // 6*j because we have 6 Point3D values per projection
projParamsArrayHost[4*j+1]=deltaU;
projParamsArrayHost[4*j+2]=deltaV;
projParamsArrayHost[4*j+3]=source;
projFloatsArrayHost[2*j]=geo.DSO[proj_global];
projFloatsArrayHost[2*j+1]=floor(maxdist);
}
cudaMemcpyToSymbolAsync(projParamsArrayDev, projParamsArrayHost, sizeof(Point3D)*4*PROJ_PER_BLOCK,0,cudaMemcpyHostToDevice,stream[0]);
cudaMemcpyToSymbolAsync(projFloatsArrayDev, projFloatsArrayHost, sizeof(float)*2*PROJ_PER_BLOCK,0,cudaMemcpyHostToDevice,stream[0]);
cudaStreamSynchronize(stream[0]);
kernelPixelDetector_parallel_interpolated<<<numBlocks,threadsPerBlock,0,stream[0]>>>(geo,dProjection[(int)i%2==0],i,nangles,texImg[0]);
// copy result to host
if (i>0)
cudaMemcpyAsync(result[i*PROJ_PER_BLOCK-PROJ_PER_BLOCK],dProjection[(int)i%2!=0], num_bytes, cudaMemcpyDeviceToHost,stream[1]);
}
cudaDeviceSynchronize();
int lastangles=nangles-(i-1)*PROJ_PER_BLOCK;
cudaMemcpyAsync(result[(i-1)*PROJ_PER_BLOCK],dProjection[(int)(i-1)%2==0], lastangles*geo.nDetecV*geo.nDetecU*sizeof(float), cudaMemcpyDeviceToHost,stream[1]);
cudaDestroyTextureObject(texImg[0]);
cudaFreeArray(d_cuArrTex[0]);
free(texImg); texImg = 0;
free(d_cuArrTex); d_cuArrTex = 0;
cudaCheckErrors("Unbind fail");
cudaFree(dProjection[0]);
cudaFree(dProjection[1]);
free(dProjection);
cudaFreeHost(projParamsArrayHost);
cudaFreeHost(projFloatsArrayHost);
cudaCheckErrors("cudaFree d_imagedata fail");
for (int i = 0; i < 2; ++i){
cudaStreamDestroy(stream[i]);
}
// cudaDeviceReset();
return 0;
}
/* This code precomputes The location of the source and the Delta U and delta V (in the warped space)
* to compute the locations of the x-rays. While it seems verbose and overly-optimized,
* it does saves about 30% of each of the kernel calls. Thats something!
**/
void computeDeltas_parallel(Geometry geo, float alpha,unsigned int i, Point3D* uvorigin, Point3D* deltaU, Point3D* deltaV, Point3D* source){
Point3D S;
S.x=geo.DSO[i];
S.y=geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5);
S.z=geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0);
//End point
Point3D P,Pu0,Pv0;
P.x =-(geo.DSD[i]-geo.DSO[i]); P.y = geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5); P.z = geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0);
Pu0.x=-(geo.DSD[i]-geo.DSO[i]); Pu0.y= geo.dDetecU*(1-((float)geo.nDetecU/2)+0.5); Pu0.z= geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0);
Pv0.x=-(geo.DSD[i]-geo.DSO[i]); Pv0.y= geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5); Pv0.z= geo.dDetecV*(((float)geo.nDetecV/2)-0.5-1);
// Geometric trasnformations:
P.x=0;Pu0.x=0;Pv0.x=0;
// Roll pitch yaw
rollPitchYaw(geo,i,&P);
rollPitchYaw(geo,i,&Pu0);
rollPitchYaw(geo,i,&Pv0);
//Now lets translate the points where they should be:
P.x=P.x-(geo.DSD[i]-geo.DSO[i]);
Pu0.x=Pu0.x-(geo.DSD[i]-geo.DSO[i]);
Pv0.x=Pv0.x-(geo.DSD[i]-geo.DSO[i]);
S.x=0;
// Roll pitch yaw
rollPitchYaw(geo,i,&S);
//Now lets translate the points where they should be:
S.x=S.x+geo.DSO[i];
//1: Offset detector
//P.x
P.y =P.y +geo.offDetecU[i]; P.z =P.z +geo.offDetecV[i];
Pu0.y=Pu0.y+geo.offDetecU[i]; Pu0.z=Pu0.z+geo.offDetecV[i];
Pv0.y=Pv0.y+geo.offDetecU[i]; Pv0.z=Pv0.z+geo.offDetecV[i];
//S doesnt need to chagne
//3: Rotate (around z)!
Point3D Pfinal, Pfinalu0, Pfinalv0;
Pfinal.x =P.x;
Pfinal.y =P.y +geo.offDetecU[i]; Pfinal.z =P.z +geo.offDetecV[i];
Pfinalu0.x=Pu0.x;
Pfinalu0.y=Pu0.y +geo.offDetecU[i]; Pfinalu0.z =Pu0.z +geo.offDetecV[i];
Pfinalv0.x=Pv0.x;
Pfinalv0.y=Pv0.y +geo.offDetecU[i]; Pfinalv0.z =Pv0.z +geo.offDetecV[i];
eulerZYZ(geo,&Pfinal);
eulerZYZ(geo,&Pfinalu0);
eulerZYZ(geo,&Pfinalv0);
eulerZYZ(geo,&S);
//2: Offset image (instead of offseting image, -offset everything else)
Pfinal.x =Pfinal.x-geo.offOrigX[i]; Pfinal.y =Pfinal.y-geo.offOrigY[i]; Pfinal.z =Pfinal.z-geo.offOrigZ[i];
Pfinalu0.x=Pfinalu0.x-geo.offOrigX[i]; Pfinalu0.y=Pfinalu0.y-geo.offOrigY[i]; Pfinalu0.z=Pfinalu0.z-geo.offOrigZ[i];
Pfinalv0.x=Pfinalv0.x-geo.offOrigX[i]; Pfinalv0.y=Pfinalv0.y-geo.offOrigY[i]; Pfinalv0.z=Pfinalv0.z-geo.offOrigZ[i];
S.x=S.x-geo.offOrigX[i]; S.y=S.y-geo.offOrigY[i]; S.z=S.z-geo.offOrigZ[i];
// As we want the (0,0,0) to be in a corner of the image, we need to translate everything (after rotation);
Pfinal.x =Pfinal.x+geo.sVoxelX/2-geo.dVoxelX/2; Pfinal.y =Pfinal.y+geo.sVoxelY/2-geo.dVoxelY/2; Pfinal.z =Pfinal.z +geo.sVoxelZ/2-geo.dVoxelZ/2;
Pfinalu0.x=Pfinalu0.x+geo.sVoxelX/2-geo.dVoxelX/2; Pfinalu0.y=Pfinalu0.y+geo.sVoxelY/2-geo.dVoxelY/2; Pfinalu0.z=Pfinalu0.z+geo.sVoxelZ/2-geo.dVoxelZ/2;
Pfinalv0.x=Pfinalv0.x+geo.sVoxelX/2-geo.dVoxelX/2; Pfinalv0.y=Pfinalv0.y+geo.sVoxelY/2-geo.dVoxelY/2; Pfinalv0.z=Pfinalv0.z+geo.sVoxelZ/2-geo.dVoxelZ/2;
S.x =S.x+geo.sVoxelX/2-geo.dVoxelX/2; S.y =S.y+geo.sVoxelY/2-geo.dVoxelY/2; S.z =S.z +geo.sVoxelZ/2-geo.dVoxelZ/2;
//4. Scale everything so dVoxel==1
Pfinal.x =Pfinal.x/geo.dVoxelX; Pfinal.y =Pfinal.y/geo.dVoxelY; Pfinal.z =Pfinal.z/geo.dVoxelZ;
Pfinalu0.x=Pfinalu0.x/geo.dVoxelX; Pfinalu0.y=Pfinalu0.y/geo.dVoxelY; Pfinalu0.z=Pfinalu0.z/geo.dVoxelZ;
Pfinalv0.x=Pfinalv0.x/geo.dVoxelX; Pfinalv0.y=Pfinalv0.y/geo.dVoxelY; Pfinalv0.z=Pfinalv0.z/geo.dVoxelZ;
S.x =S.x/geo.dVoxelX; S.y =S.y/geo.dVoxelY; S.z =S.z/geo.dVoxelZ;
//5. apply COR. Wherever everything was, now its offesetd by a bit
float CORx, CORy;
CORx=-geo.COR[i]*sin(geo.alpha)/geo.dVoxelX;
CORy= geo.COR[i]*cos(geo.alpha)/geo.dVoxelY;
Pfinal.x+=CORx; Pfinal.y+=CORy;
Pfinalu0.x+=CORx; Pfinalu0.y+=CORy;
Pfinalv0.x+=CORx; Pfinalv0.y+=CORy;
S.x+=CORx; S.y+=CORy;
// return
*uvorigin=Pfinal;
deltaU->x=Pfinalu0.x-Pfinal.x;
deltaU->y=Pfinalu0.y-Pfinal.y;
deltaU->z=Pfinalu0.z-Pfinal.z;
deltaV->x=Pfinalv0.x-Pfinal.x;
deltaV->y=Pfinalv0.y-Pfinal.y;
deltaV->z=Pfinalv0.z-Pfinal.z;
*source=S;
}
void CreateTextureParallelInterp(float* image,Geometry geo,cudaArray** d_cuArrTex, cudaTextureObject_t *texImage,cudaStream_t* stream){ //size_t size_image=geo.nVoxelX*geo.nVoxelY*geo.nVoxelZ;
const cudaExtent extent = make_cudaExtent(geo.nVoxelX, geo.nVoxelY, geo.nVoxelZ);
//cudaArray Descriptor
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>();
//cuda Array
cudaMalloc3DArray(&d_cuArrTex[0], &channelDesc, extent);
cudaMemcpy3DParms copyParams = {0};
//Array creation
copyParams.srcPtr = make_cudaPitchedPtr((void *)image, extent.width*sizeof(float), extent.width, extent.height);
copyParams.dstArray = d_cuArrTex[0];
copyParams.extent = extent;
copyParams.kind = cudaMemcpyHostToDevice;
cudaMemcpy3DAsync(©Params,stream[1]);
//Array creation End
cudaResourceDesc texRes;
memset(&texRes, 0, sizeof(cudaResourceDesc));
texRes.resType = cudaResourceTypeArray;
texRes.res.array.array = d_cuArrTex[0];
cudaTextureDesc texDescr;
memset(&texDescr, 0, sizeof(cudaTextureDesc));
texDescr.normalizedCoords = false;
texDescr.filterMode = cudaFilterModeLinear;
texDescr.addressMode[0] = cudaAddressModeBorder;
texDescr.addressMode[1] = cudaAddressModeBorder;
texDescr.addressMode[2] = cudaAddressModeBorder;
texDescr.readMode = cudaReadModeElementType;
cudaCreateTextureObject(&texImage[0], &texRes, &texDescr, NULL);
}
|
the_stack
|
#ifdef __cplusplus
extern "C" {
#endif
unsigned char computeSmoothNormals[] = {
0x50,0xed,0x55,0xba,0x01,0x00,0x10,0x00,0x38,0x28,0x00,0x00,0x00,0x00,0x00,0x00,
0x02,0x00,0x01,0x01,0x58,0x00,0x00,0x00,0x28,0x0c,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x02,0x00,0x01,0x00,0x0b,0x00,0x00,0x00,
0x40,0x00,0x00,0x00,0x15,0x00,0x00,0x00,0x44,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x52,0x65,0x6e,0x64,0x65,0x72,0x43,0x6c,0x6f,0x74,0x68,0x4e,0x6f,0x72,0x6d,0x61,
0x6c,0x73,0x2e,0x63,0x75,0x00,0x00,0x00,0x7f,0x45,0x4c,0x46,0x01,0x01,0x01,0x33,
0x02,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x02,0x00,0xbe,0x00,0x01,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0xc8,0x0b,0x00,0x00,0x60,0x0a,0x00,0x00,0x0b,0x01,0x0b,0x00,
0x34,0x00,0x20,0x00,0x03,0x00,0x28,0x00,0x09,0x00,0x01,0x00,0x00,0x2e,0x73,0x68,
0x73,0x74,0x72,0x74,0x61,0x62,0x00,0x2e,0x73,0x74,0x72,0x74,0x61,0x62,0x00,0x2e,
0x73,0x79,0x6d,0x74,0x61,0x62,0x00,0x2e,0x74,0x65,0x78,0x74,0x2e,0x63,0x6f,0x6d,
0x70,0x75,0x74,0x65,0x53,0x6d,0x6f,0x6f,0x74,0x68,0x4e,0x6f,0x72,0x6d,0x61,0x6c,
0x73,0x00,0x2e,0x6e,0x76,0x2e,0x69,0x6e,0x66,0x6f,0x2e,0x63,0x6f,0x6d,0x70,0x75,
0x74,0x65,0x53,0x6d,0x6f,0x6f,0x74,0x68,0x4e,0x6f,0x72,0x6d,0x61,0x6c,0x73,0x00,
0x2e,0x6e,0x76,0x2e,0x73,0x68,0x61,0x72,0x65,0x64,0x2e,0x63,0x6f,0x6d,0x70,0x75,
0x74,0x65,0x53,0x6d,0x6f,0x6f,0x74,0x68,0x4e,0x6f,0x72,0x6d,0x61,0x6c,0x73,0x00,
0x2e,0x6e,0x76,0x2e,0x63,0x6f,0x6e,0x73,0x74,0x61,0x6e,0x74,0x31,0x2e,0x63,0x6f,
0x6d,0x70,0x75,0x74,0x65,0x53,0x6d,0x6f,0x6f,0x74,0x68,0x4e,0x6f,0x72,0x6d,0x61,
0x6c,0x73,0x00,0x2e,0x6e,0x76,0x2e,0x6c,0x6f,0x63,0x61,0x6c,0x2e,0x63,0x6f,0x6d,
0x70,0x75,0x74,0x65,0x53,0x6d,0x6f,0x6f,0x74,0x68,0x4e,0x6f,0x72,0x6d,0x61,0x6c,
0x73,0x00,0x00,0x2e,0x73,0x68,0x73,0x74,0x72,0x74,0x61,0x62,0x00,0x2e,0x73,0x74,
0x72,0x74,0x61,0x62,0x00,0x2e,0x73,0x79,0x6d,0x74,0x61,0x62,0x00,0x63,0x6f,0x6d,
0x70,0x75,0x74,0x65,0x53,0x6d,0x6f,0x6f,0x74,0x68,0x4e,0x6f,0x72,0x6d,0x61,0x6c,
0x73,0x00,0x2e,0x74,0x65,0x78,0x74,0x2e,0x63,0x6f,0x6d,0x70,0x75,0x74,0x65,0x53,
0x6d,0x6f,0x6f,0x74,0x68,0x4e,0x6f,0x72,0x6d,0x61,0x6c,0x73,0x00,0x2e,0x6e,0x76,
0x2e,0x69,0x6e,0x66,0x6f,0x2e,0x63,0x6f,0x6d,0x70,0x75,0x74,0x65,0x53,0x6d,0x6f,
0x6f,0x74,0x68,0x4e,0x6f,0x72,0x6d,0x61,0x6c,0x73,0x00,0x2e,0x6e,0x76,0x2e,0x73,
0x68,0x61,0x72,0x65,0x64,0x2e,0x63,0x6f,0x6d,0x70,0x75,0x74,0x65,0x53,0x6d,0x6f,
0x6f,0x74,0x68,0x4e,0x6f,0x72,0x6d,0x61,0x6c,0x73,0x00,0x2e,0x6e,0x76,0x2e,0x63,
0x6f,0x6e,0x73,0x74,0x61,0x6e,0x74,0x31,0x2e,0x63,0x6f,0x6d,0x70,0x75,0x74,0x65,
0x53,0x6d,0x6f,0x6f,0x74,0x68,0x4e,0x6f,0x72,0x6d,0x61,0x6c,0x73,0x00,0x63,0x6f,
0x6e,0x73,0x74,0x00,0x2e,0x6e,0x76,0x2e,0x6c,0x6f,0x63,0x61,0x6c,0x2e,0x63,0x6f,
0x6d,0x70,0x75,0x74,0x65,0x53,0x6d,0x6f,0x6f,0x74,0x68,0x4e,0x6f,0x72,0x6d,0x61,
0x6c,0x73,0x00,0x24,0x63,0x6f,0x6d,0x70,0x75,0x74,0x65,0x53,0x6d,0x6f,0x6f,0x74,
0x68,0x4e,0x6f,0x72,0x6d,0x61,0x6c,0x73,0x24,0x6c,0x6f,0x63,0x61,0x6c,0x00,0x5f,
0x5f,0x63,0x75,0x64,0x61,0x70,0x61,0x72,0x6d,0x5f,0x63,0x6f,0x6d,0x70,0x75,0x74,
0x65,0x53,0x6d,0x6f,0x6f,0x74,0x68,0x4e,0x6f,0x72,0x6d,0x61,0x6c,0x73,0x5f,0x70,
0x61,0x72,0x74,0x69,0x63,0x6c,0x65,0x73,0x00,0x5f,0x5f,0x63,0x75,0x64,0x61,0x70,
0x61,0x72,0x6d,0x5f,0x63,0x6f,0x6d,0x70,0x75,0x74,0x65,0x53,0x6d,0x6f,0x6f,0x74,
0x68,0x4e,0x6f,0x72,0x6d,0x61,0x6c,0x73,0x5f,0x69,0x6e,0x64,0x69,0x63,0x65,0x73,
0x00,0x5f,0x5f,0x63,0x75,0x64,0x61,0x70,0x61,0x72,0x6d,0x5f,0x63,0x6f,0x6d,0x70,
0x75,0x74,0x65,0x53,0x6d,0x6f,0x6f,0x74,0x68,0x4e,0x6f,0x72,0x6d,0x61,0x6c,0x73,
0x5f,0x76,0x65,0x72,0x74,0x69,0x63,0x65,0x73,0x00,0x5f,0x5f,0x63,0x75,0x64,0x61,
0x70,0x61,0x72,0x6d,0x5f,0x63,0x6f,0x6d,0x70,0x75,0x74,0x65,0x53,0x6d,0x6f,0x6f,
0x74,0x68,0x4e,0x6f,0x72,0x6d,0x61,0x6c,0x73,0x5f,0x6e,0x75,0x6d,0x54,0x72,0x69,
0x73,0x00,0x5f,0x5f,0x63,0x75,0x64,0x61,0x70,0x61,0x72,0x6d,0x5f,0x63,0x6f,0x6d,
0x70,0x75,0x74,0x65,0x53,0x6d,0x6f,0x6f,0x74,0x68,0x4e,0x6f,0x72,0x6d,0x61,0x6c,
0x73,0x5f,0x6e,0x75,0x6d,0x50,0x61,0x72,0x74,0x69,0x63,0x6c,0x65,0x73,0x00,0x24,
0x5f,0x5f,0x5f,0x5f,0x63,0x75,0x64,0x61,0x5f,0x5f,0x5f,0x63,0x75,0x64,0x61,0x5f,
0x5f,0x5f,0x54,0x32,0x31,0x36,0x5f,0x34,0x30,0x32,0x30,0x5f,0x5f,0x37,0x30,0x00,
0x24,0x5f,0x5f,0x5f,0x5f,0x63,0x75,0x64,0x61,0x5f,0x5f,0x5f,0x63,0x75,0x64,0x61,
0x5f,0x5f,0x5f,0x54,0x32,0x32,0x31,0x5f,0x35,0x32,0x33,0x32,0x5f,0x5f,0x37,0x31,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x30,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x03,0x00,0x06,0x00,0x69,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x03,0x00,0x07,0x00,0x89,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x03,0x00,0x05,0x00,0xb2,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x03,0x00,0x08,0x00,0x1b,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xa8,0x06,0x00,0x00,
0x12,0x10,0x06,0x00,0x03,0x18,0x14,0x00,0x04,0x17,0x0c,0x00,0x00,0x00,0x00,0x00,
0x04,0x00,0x10,0x00,0x00,0xf0,0x13,0x00,0x04,0x17,0x0c,0x00,0x00,0x00,0x00,0x00,
0x03,0x00,0x0c,0x00,0x00,0xf0,0x13,0x00,0x04,0x17,0x0c,0x00,0x00,0x00,0x00,0x00,
0x02,0x00,0x08,0x00,0x00,0xf0,0x13,0x00,0x04,0x17,0x0c,0x00,0x00,0x00,0x00,0x00,
0x01,0x00,0x04,0x00,0x00,0xf0,0x13,0x00,0x04,0x17,0x0c,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0xf0,0x13,0x00,0x03,0x1b,0x01,0x00,0x04,0x1e,0x04,0x00,
0x10,0x01,0x00,0x00,0x18,0x00,0x00,0x00,0x06,0x00,0x00,0x00,0x10,0x00,0x00,0x00,
0x11,0x00,0x00,0xa0,0x80,0x07,0x00,0x04,0x01,0xd0,0x04,0x30,0xd0,0x07,0x21,0x64,
0xfd,0x01,0x00,0xa0,0xc8,0x47,0x01,0x0c,0x03,0x50,0x01,0xa0,0x00,0x00,0x00,0x00,
0x03,0x50,0x01,0x10,0x00,0x11,0x00,0x00,0x05,0xd0,0x04,0x30,0x80,0x07,0x30,0xc4,
0x09,0xd0,0x03,0x30,0x80,0x07,0x30,0xc4,0x00,0xec,0x00,0x11,0x08,0x82,0x02,0x20,
0x05,0x22,0x18,0x41,0x03,0x00,0x00,0x00,0x01,0x08,0x80,0x60,0x80,0x07,0x40,0x60,
0x09,0xcc,0x00,0x20,0x80,0x87,0x20,0x04,0x15,0x80,0x0c,0x20,0x03,0x00,0x00,0x00,
0x0d,0x80,0x00,0x10,0x03,0x00,0x00,0x00,0x19,0x80,0x10,0x20,0x03,0x00,0x00,0x00,
0x1d,0x80,0x14,0x20,0x03,0x00,0x00,0x00,0x0d,0x0a,0x0e,0xd0,0x80,0x07,0xc0,0xa0,
0x01,0x00,0x00,0x20,0x80,0x47,0x00,0x04,0x0d,0x0c,0x0e,0xd0,0x80,0x07,0xc0,0xa0,
0xfd,0x01,0x02,0x30,0xd8,0x47,0x00,0x64,0x0d,0x0e,0x0e,0xd0,0x80,0x07,0xc0,0xa0,
0x03,0xb0,0x00,0x10,0x80,0x12,0x00,0x00,0x01,0x00,0x00,0xf0,0x02,0x00,0x00,0xe0,
0x03,0xfe,0x1f,0x86,0x00,0x00,0x00,0x00,0xfd,0xcf,0x04,0x30,0xd8,0xc7,0x20,0x64,
0x03,0x80,0x09,0xa0,0x00,0x00,0x00,0x00,0x03,0x80,0x09,0x10,0x80,0x12,0x00,0x00,
0x05,0xce,0x02,0x30,0x80,0x07,0x30,0xc4,0x09,0xce,0x01,0x30,0x80,0x07,0x30,0xc4,
0x00,0xea,0x00,0x11,0x04,0x82,0x02,0x20,0x15,0x22,0x06,0x41,0x03,0x00,0x00,0x00,
0x01,0x08,0x81,0x60,0x80,0x07,0x40,0x60,0x19,0xca,0x00,0x20,0x80,0x47,0x20,0x04,
0x25,0x00,0x0e,0xd0,0x80,0x07,0x40,0x80,0x05,0x80,0x02,0x20,0x03,0x00,0x00,0x00,
0x21,0x02,0x0e,0xd0,0x80,0x07,0x40,0x80,0x05,0x80,0x04,0x20,0x03,0x00,0x00,0x00,
0x1d,0x02,0x0e,0xd0,0x80,0x07,0x40,0x80,0x05,0x08,0x00,0x10,0x80,0xc7,0x00,0x44,
0x0d,0x24,0x84,0x60,0x80,0x47,0x40,0x00,0x2d,0x06,0x0e,0xd0,0x80,0x07,0xc0,0x80,
0x09,0x20,0x84,0x60,0x80,0x47,0x40,0x00,0x29,0x04,0x0e,0xd0,0x80,0x07,0xc0,0x80,
0x05,0x1c,0x10,0x60,0x03,0x00,0x00,0x00,0x39,0x02,0x0e,0xd0,0x80,0x07,0xc0,0x80,
0x31,0x86,0x04,0x20,0x03,0x00,0x00,0x00,0x31,0x18,0x0e,0xd0,0x80,0x07,0xc0,0x80,
0x35,0x84,0x04,0x20,0x03,0x00,0x00,0x00,0x35,0x1a,0x0e,0xd0,0x80,0x07,0xc0,0x80,
0x28,0x14,0x4b,0xb0,0x2c,0x96,0x0e,0xb0,0x0d,0x86,0x08,0x20,0x03,0x00,0x00,0x00,
0x0d,0x06,0x0e,0xd0,0x80,0x07,0xc0,0x80,0x09,0x84,0x08,0x20,0x03,0x00,0x00,0x00,
0x09,0x04,0x0e,0xd0,0x80,0x07,0xc0,0x80,0x39,0x82,0x04,0x20,0x03,0x00,0x00,0x00,
0x39,0x1c,0x0e,0xd0,0x80,0x07,0xc0,0x80,0x05,0x82,0x08,0x20,0x03,0x00,0x00,0x00,
0x05,0x02,0x0e,0xd0,0x80,0x07,0xc0,0x80,0x34,0x1a,0x4c,0xb0,0x30,0x98,0x0e,0xb0,
0x38,0x04,0x43,0xb0,0x3c,0x86,0x01,0xb0,0x09,0x0c,0x00,0x10,0x80,0xc7,0x00,0x44,
0x09,0x24,0x18,0x60,0x03,0x00,0x00,0x00,0x0c,0x1a,0x0f,0xc0,0x08,0x1c,0x00,0x10,
0x0d,0x20,0x00,0x10,0x80,0xc7,0x03,0x00,0x1d,0x84,0x0c,0x20,0x03,0x00,0x00,0x00,
0x0d,0x1c,0x0c,0xe0,0x80,0xc7,0x00,0x08,0x03,0x80,0x04,0xa0,0x00,0x00,0x00,0x00,
0x21,0x84,0x0c,0x20,0x03,0x00,0x00,0x00,0x21,0x10,0x0e,0xd0,0x80,0x07,0xc0,0x80,
0x25,0x10,0x00,0xb0,0x80,0xc7,0x00,0x00,0x25,0x0e,0x08,0xd7,0x88,0x47,0xc2,0xe0,
0xfd,0x13,0x08,0x30,0xd8,0x47,0x01,0x64,0x03,0x20,0x04,0x10,0x80,0x12,0x00,0x00,
0x1d,0x1c,0x0b,0xc0,0x82,0x07,0x00,0x00,0x1d,0x14,0x0f,0xe0,0x80,0xc7,0x01,0x08,
0x03,0x20,0x05,0xa0,0x00,0x00,0x00,0x00,0x21,0x84,0x10,0x20,0x03,0x00,0x00,0x00,
0x25,0x84,0x10,0x20,0x03,0x00,0x00,0x00,0x25,0x12,0x0e,0xd0,0x80,0x07,0xc0,0x80,
0x39,0x12,0x00,0xb0,0x80,0xc7,0x01,0x00,0x39,0x10,0x09,0xd7,0x88,0x87,0xc3,0xe0,
0xfd,0x1d,0x09,0x30,0xd8,0x47,0x01,0x64,0x03,0xc0,0x04,0x10,0x80,0x12,0x00,0x00,
0x21,0x14,0x0c,0xc0,0x82,0x07,0x00,0x00,0x21,0x1a,0x0b,0xe0,0x80,0x07,0x02,0x08,
0x03,0xc0,0x05,0xa0,0x00,0x00,0x00,0x00,0x25,0x84,0x14,0x20,0x03,0x00,0x00,0x00,
0x29,0x84,0x14,0x20,0x03,0x00,0x00,0x00,0x29,0x14,0x0e,0xd0,0x80,0x07,0xc0,0x80,
0x2d,0x14,0x00,0xb0,0x80,0x07,0x02,0x00,0x2d,0x12,0x0a,0xd7,0x88,0xc7,0xc2,0xe0,
0xfd,0x17,0x0a,0x30,0xd8,0x47,0x01,0x64,0x03,0x60,0x05,0x10,0x80,0x12,0x00,0x00,
0x09,0x0c,0x00,0x10,0x82,0xc7,0x00,0x44,0x09,0x06,0x18,0x60,0x03,0x00,0x00,0x00,
0x03,0x60,0x06,0xa0,0x00,0x00,0x00,0x00,0x25,0x84,0x0c,0x20,0x03,0x00,0x00,0x00,
0x29,0x84,0x0c,0x20,0x03,0x00,0x00,0x00,0x29,0x14,0x0e,0xd0,0x80,0x07,0xc0,0x80,
0x2d,0x14,0x00,0xb0,0x80,0xc7,0x00,0x00,0x2d,0x12,0x0a,0xd7,0x88,0xc7,0xc2,0xe0,
0xfd,0x17,0x0a,0x30,0xd8,0x47,0x01,0x64,0x03,0x00,0x06,0x10,0x80,0x12,0x00,0x00,
0x01,0x00,0x00,0xf0,0x02,0x00,0x00,0xe0,0x03,0xf0,0x06,0xa0,0x00,0x00,0x00,0x00,
0x25,0x84,0x10,0x20,0x03,0x00,0x00,0x00,0x29,0x84,0x10,0x20,0x03,0x00,0x00,0x00,
0x29,0x14,0x0e,0xd0,0x80,0x07,0xc0,0x80,0x2d,0x14,0x00,0xb0,0x80,0xc7,0x01,0x00,
0x2d,0x12,0x0a,0xd7,0x88,0xc7,0xc2,0xe0,0xfd,0x17,0x0a,0x30,0xd8,0x47,0x01,0x64,
0x03,0x90,0x06,0x10,0x80,0x12,0x00,0x00,0x01,0x00,0x00,0xf0,0x02,0x00,0x00,0xe0,
0x03,0x80,0x07,0xa0,0x00,0x00,0x00,0x00,0x25,0x84,0x14,0x20,0x03,0x00,0x00,0x00,
0x29,0x84,0x14,0x20,0x03,0x00,0x00,0x00,0x29,0x14,0x0e,0xd0,0x80,0x07,0xc0,0x80,
0x2d,0x14,0x00,0xb0,0x80,0x07,0x02,0x00,0x2d,0x12,0x0a,0xd7,0x88,0xc7,0xc2,0xe0,
0xfd,0x17,0x0a,0x30,0xd8,0x47,0x01,0x64,0x03,0x20,0x07,0x10,0x80,0x12,0x00,0x00,
0x09,0x0c,0x00,0x10,0x82,0xc7,0x00,0x44,0x05,0x04,0x80,0x60,0x80,0x87,0x40,0x00,
0x03,0x20,0x08,0xa0,0x00,0x00,0x00,0x00,0x09,0x82,0x0c,0x20,0x03,0x00,0x00,0x00,
0x25,0x82,0x0c,0x20,0x03,0x00,0x00,0x00,0x25,0x12,0x0e,0xd0,0x80,0x07,0xc0,0x80,
0x29,0x12,0x00,0xb0,0x80,0xc7,0x00,0x00,0x29,0x04,0x09,0xd7,0x88,0x87,0xc2,0xe0,
0xfd,0x15,0x09,0x30,0xd8,0x47,0x01,0x64,0x03,0xc0,0x07,0x10,0x80,0x12,0x00,0x00,
0x01,0x00,0x00,0xf0,0x02,0x00,0x00,0xe0,0x03,0xb0,0x08,0xa0,0x00,0x00,0x00,0x00,
0x09,0x82,0x10,0x20,0x03,0x00,0x00,0x00,0x0d,0x82,0x10,0x20,0x03,0x00,0x00,0x00,
0x0d,0x06,0x0e,0xd0,0x80,0x07,0xc0,0x80,0x25,0x06,0x00,0xb0,0x80,0xc7,0x01,0x00,
0x25,0x04,0x03,0xd7,0x88,0x47,0xc2,0xe0,0xfd,0x13,0x03,0x30,0xd8,0x47,0x01,0x64,
0x03,0x50,0x08,0x10,0x80,0x12,0x00,0x00,0x01,0x00,0x00,0xf0,0x02,0x00,0x00,0xe0,
0x03,0x40,0x09,0xa0,0x00,0x00,0x00,0x00,0x09,0x82,0x14,0x20,0x03,0x00,0x00,0x00,
0x0d,0x82,0x14,0x20,0x03,0x00,0x00,0x00,0x0d,0x06,0x0e,0xd0,0x80,0x07,0xc0,0x80,
0x1d,0x06,0x00,0xb0,0x80,0x07,0x02,0x00,0x1d,0x04,0x03,0xd7,0x88,0xc7,0xc1,0xe0,
0xfd,0x0f,0x03,0x30,0xd8,0x47,0x01,0x64,0x03,0xe0,0x08,0x10,0x80,0x12,0x00,0x00,
0x01,0x00,0x00,0xf0,0x02,0x00,0x00,0xe0,0x01,0x0a,0x00,0x20,0x80,0x07,0x00,0x04,
0xfd,0x01,0x06,0x30,0xd8,0x47,0x00,0x64,0x03,0x00,0x02,0x10,0x80,0x12,0x00,0x00,
0x01,0x00,0x00,0xf0,0x02,0x00,0x00,0xe0,0x03,0xfe,0x1f,0x86,0x00,0x00,0x00,0x00,
0x03,0x00,0x00,0x30,0x00,0x01,0x00,0x00,0x01,0x0c,0x00,0x10,0x80,0xc7,0x00,0x44,
0x01,0x08,0x80,0x60,0x80,0x07,0x40,0x60,0x0d,0x08,0x04,0x30,0x80,0x07,0x10,0xc4,
0x11,0xd0,0x04,0x30,0x80,0x07,0x30,0xc4,0x05,0x22,0x10,0x41,0x03,0x00,0x00,0x00,
0x09,0x22,0x18,0x41,0x03,0x00,0x00,0x00,0x0c,0xe8,0x03,0x21,0x10,0xe8,0x04,0x21,
0x15,0x06,0x0e,0xd0,0x80,0x07,0xc0,0x80,0x15,0x00,0x0e,0xd0,0x80,0x07,0xc0,0xa0,
0x15,0x86,0x04,0x20,0x03,0x00,0x00,0x00,0x15,0x0a,0x0e,0xd0,0x80,0x07,0xc0,0x80,
0x19,0x80,0x04,0x20,0x03,0x00,0x00,0x00,0x15,0x0c,0x0e,0xd0,0x80,0x07,0xc0,0xa0,
0x15,0x86,0x08,0x20,0x03,0x00,0x00,0x00,0x15,0x0a,0x0e,0xd0,0x80,0x07,0xc0,0x80,
0x19,0x80,0x08,0x20,0x03,0x00,0x00,0x00,0x15,0x0c,0x0e,0xd0,0x80,0x07,0xc0,0xa0,
0x15,0x80,0x10,0x20,0x03,0x00,0x00,0x00,0x15,0x0a,0x0e,0xd0,0x80,0x07,0xc0,0x80,
0x03,0x70,0x0c,0xa0,0x00,0x00,0x00,0x00,0x19,0x80,0x0c,0x20,0x03,0x00,0x00,0x00,
0x19,0x0c,0x0e,0xd0,0x80,0x07,0xc0,0x80,0x1d,0x80,0x14,0x20,0x03,0x00,0x00,0x00,
0x1d,0x0e,0x0e,0xd0,0x80,0x07,0xc0,0x80,0x21,0x0a,0x05,0xc0,0x80,0x07,0x00,0x00,
0x21,0x0c,0x06,0xe0,0x80,0x07,0x02,0x00,0x21,0x0e,0x07,0xe0,0x80,0x07,0x02,0x00,
0xfd,0x11,0x7c,0xb0,0xc8,0x07,0x01,0x60,0x03,0x10,0x0c,0x10,0x00,0x01,0x00,0x00,
0x21,0x10,0x00,0x90,0x80,0x07,0x00,0x40,0x20,0x10,0x00,0x90,0x20,0x10,0x00,0x90,
0x19,0x10,0x06,0xc0,0x80,0x07,0x00,0x00,0x19,0x00,0x00,0xd0,0x80,0x07,0xc0,0x60,
0x14,0x10,0x05,0xc0,0x18,0x10,0x07,0xc0,0x15,0x08,0x00,0xd0,0x80,0x07,0xc0,0x60,
0x19,0x10,0x00,0xd0,0x80,0x07,0xc0,0x60,0x05,0xf8,0x00,0x00,0x80,0x07,0x00,0xc0,
0x03,0x70,0x0c,0x10,0x80,0x07,0x00,0x00,0x15,0x80,0x00,0x10,0x03,0x00,0x00,0x00,
0x15,0x18,0x00,0xd0,0x80,0x07,0xc0,0x60,0x15,0x20,0x00,0xd0,0x80,0x07,0xc0,0x60,
0x19,0x80,0x0c,0x10,0x03,0x00,0x00,0x00,0x15,0x28,0x00,0xd0,0x80,0x07,0xc0,0x60,
0x05,0x0c,0x00,0x00,0x80,0x07,0x00,0xc0,0x15,0x00,0x00,0xd4,0x82,0x07,0xc0,0x40,
0x1d,0x08,0x00,0xd4,0x80,0x07,0xc0,0x40,0x19,0x10,0x00,0xd4,0x80,0x07,0xc0,0x40,
0x21,0x80,0x0c,0x20,0x03,0x00,0x00,0x00,0x15,0x10,0x0e,0xd0,0x80,0x07,0xc0,0xa0,
0x15,0x80,0x10,0x20,0x03,0x00,0x00,0x00,0x0d,0x02,0x00,0x20,0x80,0xc7,0x00,0x04,
0x1d,0x0a,0x0e,0xd0,0x80,0x07,0xc0,0xa0,0x15,0x80,0x14,0x20,0x03,0x00,0x00,0x00,
0xfd,0x07,0x04,0x30,0xc8,0x47,0x00,0x64,0x19,0x0a,0x0e,0xd0,0x80,0x07,0xc0,0xa0,
0x01,0x00,0x00,0x20,0x80,0x87,0x00,0x04,0x03,0x20,0x0a,0x10,0x80,0x02,0x00,0x00,
0x01,0x00,0x00,0xf0,0x01,0x00,0x00,0xe0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x01,0x00,0x00,0x00,0x03,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x34,0x00,0x00,0x00,0xb6,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0b,0x00,0x00,0x00,0x03,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xea,0x00,0x00,0x00,0xff,0x01,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x13,0x00,0x00,0x00,0x02,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0xec,0x02,0x00,0x00,0x60,0x00,0x00,0x00,0x02,0x00,0x00,0x00,0x04,0x00,0x00,0x00,
0x04,0x00,0x00,0x00,0x10,0x00,0x00,0x00,0x36,0x00,0x00,0x00,0x00,0x00,0x00,0x70,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x4c,0x03,0x00,0x00,0x60,0x00,0x00,0x00,
0x03,0x00,0x00,0x00,0x06,0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x74,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x02,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0xac,0x03,0x00,0x00,0x0c,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x06,0x00,0x00,0x00,
0x04,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x1b,0x00,0x00,0x00,0x01,0x00,0x00,0x00,
0x06,0x00,0x10,0x00,0x00,0x00,0x00,0x00,0xb8,0x03,0x00,0x00,0xa8,0x06,0x00,0x00,
0x03,0x00,0x00,0x00,0x05,0x00,0x00,0x10,0x04,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x54,0x00,0x00,0x00,0x08,0x00,0x00,0x00,0x03,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x60,0x0a,0x00,0x00,0x24,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x06,0x00,0x00,0x00,
0x04,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x97,0x00,0x00,0x00,0x08,0x00,0x00,0x00,
0x03,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x60,0x0a,0x00,0x00,0x18,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x06,0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x06,0x00,0x00,0x00,0xc8,0x0b,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x60,0x00,0x00,0x00,0x60,0x00,0x00,0x00,0x05,0x00,0x00,0x00,0x04,0x00,0x00,0x00,
0x01,0x00,0x00,0x00,0xac,0x03,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0xb4,0x06,0x00,0x00,0xb4,0x06,0x00,0x00,0x05,0x00,0x00,0x00,0x04,0x00,0x00,0x00,
0x01,0x00,0x00,0x00,0x60,0x0a,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x24,0x00,0x00,0x00,0x06,0x00,0x00,0x00,0x04,0x00,0x00,0x00,
0x02,0x00,0x01,0x01,0x58,0x00,0x00,0x00,0x70,0x07,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x00,0x01,0x00,0x14,0x00,0x00,0x00,
0x40,0x00,0x00,0x00,0x15,0x00,0x00,0x00,0x44,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x52,0x65,0x6e,0x64,0x65,0x72,0x43,0x6c,0x6f,0x74,0x68,0x4e,0x6f,0x72,0x6d,0x61,
0x6c,0x73,0x2e,0x63,0x75,0x00,0x00,0x00,0x7f,0x45,0x4c,0x46,0x01,0x01,0x01,0x33,
0x07,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x02,0x00,0xbe,0x00,0x01,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x0c,0x07,0x00,0x00,0xcc,0x05,0x00,0x00,0x14,0x01,0x14,0x00,
0x34,0x00,0x20,0x00,0x03,0x00,0x28,0x00,0x08,0x00,0x01,0x00,0x00,0x2e,0x73,0x68,
0x73,0x74,0x72,0x74,0x61,0x62,0x00,0x2e,0x73,0x74,0x72,0x74,0x61,0x62,0x00,0x2e,
0x73,0x79,0x6d,0x74,0x61,0x62,0x00,0x2e,0x74,0x65,0x78,0x74,0x2e,0x63,0x6f,0x6d,
0x70,0x75,0x74,0x65,0x53,0x6d,0x6f,0x6f,0x74,0x68,0x4e,0x6f,0x72,0x6d,0x61,0x6c,
0x73,0x00,0x2e,0x6e,0x76,0x2e,0x69,0x6e,0x66,0x6f,0x2e,0x63,0x6f,0x6d,0x70,0x75,
0x74,0x65,0x53,0x6d,0x6f,0x6f,0x74,0x68,0x4e,0x6f,0x72,0x6d,0x61,0x6c,0x73,0x00,
0x2e,0x6e,0x76,0x2e,0x73,0x68,0x61,0x72,0x65,0x64,0x2e,0x63,0x6f,0x6d,0x70,0x75,
0x74,0x65,0x53,0x6d,0x6f,0x6f,0x74,0x68,0x4e,0x6f,0x72,0x6d,0x61,0x6c,0x73,0x00,
0x2e,0x6e,0x76,0x2e,0x69,0x6e,0x66,0x6f,0x00,0x2e,0x6e,0x76,0x2e,0x63,0x6f,0x6e,
0x73,0x74,0x61,0x6e,0x74,0x30,0x2e,0x63,0x6f,0x6d,0x70,0x75,0x74,0x65,0x53,0x6d,
0x6f,0x6f,0x74,0x68,0x4e,0x6f,0x72,0x6d,0x61,0x6c,0x73,0x00,0x00,0x2e,0x73,0x68,
0x73,0x74,0x72,0x74,0x61,0x62,0x00,0x2e,0x73,0x74,0x72,0x74,0x61,0x62,0x00,0x2e,
0x73,0x79,0x6d,0x74,0x61,0x62,0x00,0x63,0x6f,0x6d,0x70,0x75,0x74,0x65,0x53,0x6d,
0x6f,0x6f,0x74,0x68,0x4e,0x6f,0x72,0x6d,0x61,0x6c,0x73,0x00,0x2e,0x74,0x65,0x78,
0x74,0x2e,0x63,0x6f,0x6d,0x70,0x75,0x74,0x65,0x53,0x6d,0x6f,0x6f,0x74,0x68,0x4e,
0x6f,0x72,0x6d,0x61,0x6c,0x73,0x00,0x2e,0x6e,0x76,0x2e,0x69,0x6e,0x66,0x6f,0x2e,
0x63,0x6f,0x6d,0x70,0x75,0x74,0x65,0x53,0x6d,0x6f,0x6f,0x74,0x68,0x4e,0x6f,0x72,
0x6d,0x61,0x6c,0x73,0x00,0x2e,0x6e,0x76,0x2e,0x73,0x68,0x61,0x72,0x65,0x64,0x2e,
0x63,0x6f,0x6d,0x70,0x75,0x74,0x65,0x53,0x6d,0x6f,0x6f,0x74,0x68,0x4e,0x6f,0x72,
0x6d,0x61,0x6c,0x73,0x00,0x2e,0x6e,0x76,0x2e,0x69,0x6e,0x66,0x6f,0x00,0x2e,0x6e,
0x76,0x2e,0x63,0x6f,0x6e,0x73,0x74,0x61,0x6e,0x74,0x30,0x2e,0x63,0x6f,0x6d,0x70,
0x75,0x74,0x65,0x53,0x6d,0x6f,0x6f,0x74,0x68,0x4e,0x6f,0x72,0x6d,0x61,0x6c,0x73,
0x00,0x5f,0x70,0x61,0x72,0x61,0x6d,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x30,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x03,0x00,0x07,0x00,0x92,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x03,0x00,0x06,0x00,0x1b,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x48,0x03,0x00,0x00,0x12,0x10,0x07,0x00,0x04,0x0a,0x08,0x00,0x02,0x00,0x00,0x00,
0x20,0x00,0x14,0x00,0x03,0x19,0x14,0x00,0x04,0x17,0x0c,0x00,0x00,0x00,0x00,0x00,
0x04,0x00,0x10,0x00,0x00,0xf0,0x11,0x00,0x04,0x17,0x0c,0x00,0x00,0x00,0x00,0x00,
0x03,0x00,0x0c,0x00,0x00,0xf0,0x11,0x00,0x04,0x17,0x0c,0x00,0x00,0x00,0x00,0x00,
0x02,0x00,0x08,0x00,0x00,0xf0,0x11,0x00,0x04,0x17,0x0c,0x00,0x00,0x00,0x00,0x00,
0x01,0x00,0x04,0x00,0x00,0xf0,0x11,0x00,0x04,0x17,0x0c,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0xf0,0x11,0x00,0x04,0x1e,0x04,0x00,0x8c,0x01,0x00,0x00,
0x04,0x12,0x08,0x00,0x03,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x04,0x11,0x08,0x00,
0x03,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xe4,0x5d,0x00,0x00,
0x04,0x44,0x00,0x28,0x04,0x1c,0x00,0x84,0x00,0x00,0x00,0x2c,0x07,0x00,0x00,0xa0,
0x01,0x00,0x00,0x60,0x03,0xdc,0x01,0xc0,0x00,0x40,0x0e,0x1b,0xe7,0x01,0x00,0x40,
0x01,0x00,0x00,0x40,0xe4,0x1d,0x01,0xa0,0x00,0x40,0x00,0x28,0xe4,0x9d,0x00,0x00,
0x00,0x00,0x00,0x28,0x03,0xdc,0x1f,0x10,0x00,0x00,0x7e,0x20,0xa3,0xdc,0x20,0x60,
0x00,0xc0,0x08,0x20,0x03,0x9c,0x20,0x20,0x00,0x40,0x00,0x48,0x85,0xdc,0x3f,0x30,
0x00,0x00,0x00,0x90,0x03,0xdc,0x21,0xc0,0x00,0x40,0x8e,0x18,0x85,0xdc,0x3f,0x40,
0x00,0x00,0x00,0x90,0x85,0xdc,0x3f,0x50,0x00,0x00,0x00,0x90,0xe7,0x01,0x00,0x20,
0xff,0xff,0x03,0x40,0x13,0xdc,0x1f,0xfc,0x00,0x00,0x7e,0x20,0x07,0x00,0x00,0x80,
0x06,0x00,0x00,0x60,0x04,0xdc,0xff,0xff,0x00,0x00,0xee,0x50,0x03,0xdc,0x01,0xb0,
0x00,0x40,0x0e,0x1b,0xe7,0x01,0x00,0x00,0x06,0x00,0x00,0x40,0xe4,0x5d,0x02,0xa0,
0x00,0x40,0x00,0x28,0xe4,0x9d,0x00,0x00,0x00,0x00,0x00,0x28,0x03,0xdc,0x9f,0xfc,
0x00,0x00,0x7e,0x20,0xe4,0x1d,0x00,0x00,0x00,0x00,0x00,0x40,0xa3,0xdc,0x20,0x0c,
0x00,0xc0,0x00,0x50,0x03,0x9c,0x20,0x20,0x00,0x40,0x00,0x48,0x23,0xdc,0x30,0x90,
0x00,0x40,0x00,0x40,0x03,0xdc,0x21,0xb0,0x00,0x40,0x8e,0x18,0x45,0xdc,0x33,0x10,
0x00,0x00,0x00,0x80,0x45,0x1c,0x32,0x08,0x00,0x00,0x00,0x80,0x45,0x9c,0x32,0x00,
0x00,0x00,0x00,0x80,0x83,0x9c,0xf1,0x80,0x00,0x40,0x00,0x40,0x83,0x5c,0x81,0x80,
0x00,0x40,0x00,0x40,0x83,0x1c,0xa4,0x80,0x00,0x40,0x00,0x40,0x85,0x5c,0x63,0x00,
0x00,0x00,0x00,0x80,0x85,0xdc,0x61,0x10,0x00,0x00,0x00,0x80,0x85,0x9c,0x53,0x00,
0x00,0x00,0x00,0x80,0x85,0xdc,0x52,0x20,0x00,0x00,0x00,0x80,0x85,0xdc,0x00,0x11,
0x00,0x00,0x00,0x80,0x85,0x1c,0x01,0x21,0x00,0x00,0x00,0x80,0x85,0x1c,0x03,0x01,
0x00,0x00,0x00,0x80,0x85,0x9c,0x61,0x20,0x00,0x00,0x00,0x80,0x85,0x5c,0x51,0x10,
0x00,0x00,0x00,0x80,0xa3,0x9c,0xa2,0x60,0x00,0xc0,0x12,0x20,0xa3,0x1c,0x82,0x60,
0x00,0xc0,0x12,0x20,0xa3,0xdc,0xf3,0x60,0x00,0xc0,0x12,0x20,0x20,0xdd,0x71,0x0c,
0x00,0x00,0x00,0x50,0x20,0xdd,0xb2,0x10,0x00,0x00,0x00,0x50,0x20,0x9d,0xe3,0x30,
0x00,0x00,0x00,0x50,0x20,0x1d,0x61,0x10,0x00,0x00,0x00,0x50,0x20,0x1d,0xd3,0x30,
0x00,0x00,0x00,0x50,0x20,0x5d,0x51,0x0c,0x00,0x00,0x00,0x50,0x40,0xdc,0x70,0x2c,
0x00,0x00,0x00,0x58,0x40,0x9c,0x41,0x38,0x00,0x00,0x00,0x58,0x40,0x5c,0xc3,0x14,
0x00,0x00,0x00,0x58,0x40,0xde,0x40,0x14,0x00,0x00,0x06,0x30,0x40,0x1e,0xc1,0x2c,
0x00,0x00,0x0c,0x30,0x40,0x5e,0x71,0x38,0x00,0x00,0x1a,0x30,0x05,0xde,0xa0,0x30,
0x00,0x00,0x00,0x28,0x05,0x1e,0xa1,0x40,0x00,0x00,0x00,0x28,0x05,0x5e,0xa1,0x50,
0x00,0x00,0x00,0x28,0x05,0xde,0x80,0x30,0x00,0x00,0x00,0x28,0x05,0x1e,0x81,0x40,
0x00,0x00,0x00,0x28,0x05,0x5e,0x81,0x50,0x00,0x00,0x00,0x28,0x05,0xde,0xf0,0x30,
0x00,0x00,0x00,0x28,0x05,0x1e,0xf1,0x40,0x00,0x00,0x00,0x28,0x05,0x5e,0xf1,0x50,
0x00,0x00,0x00,0x28,0xe7,0x01,0x00,0x80,0xfa,0xff,0x03,0x40,0x13,0xdc,0x01,0xc0,
0x00,0x40,0x8e,0x18,0x04,0xdc,0xff,0x07,0x00,0xc0,0x00,0x30,0x04,0xdc,0xff,0xff,
0x00,0x00,0xee,0x50,0xe7,0x21,0x00,0x00,0x00,0x00,0x00,0x80,0xe4,0x9d,0x02,0xa0,
0x00,0x40,0x00,0x28,0x03,0xdc,0xaf,0xfc,0x00,0x00,0x7e,0x20,0x83,0x1c,0x02,0x80,
0x00,0x40,0x00,0x40,0xa3,0xdc,0x01,0x60,0x00,0xc0,0x14,0x20,0x85,0x5c,0x81,0x00,
0x00,0x00,0x00,0x80,0x85,0x5c,0x71,0x00,0x00,0x00,0x00,0x90,0x85,0x9c,0x81,0x10,
0x00,0x00,0x00,0x80,0x85,0xdc,0x70,0x40,0x00,0x00,0x00,0x80,0x85,0x9c,0x70,0x30,
0x00,0x00,0x00,0x80,0x85,0x1c,0x71,0x50,0x00,0x00,0x00,0x80,0x85,0x9c,0x71,0x10,
0x00,0x00,0x00,0x90,0x85,0x5c,0x81,0x20,0x00,0x00,0x00,0x80,0x40,0x5c,0x32,0x0c,
0x00,0x00,0x00,0x58,0x40,0x5c,0x22,0x08,0x00,0x00,0x12,0x30,0x40,0x5c,0x42,0x10,
0x00,0x00,0x12,0x30,0x00,0xdc,0x91,0xfc,0x00,0x00,0x0e,0x2a,0x85,0x5c,0x71,0x20,
0x00,0x00,0x00,0x90,0xe7,0x81,0x00,0x80,0x00,0x00,0x00,0x40,0xe4,0xa1,0x00,0xfc,
0x00,0x00,0x00,0x28,0xe4,0xe1,0x00,0xfc,0x00,0x00,0x00,0x28,0xe4,0x21,0x01,0xfc,
0x00,0x00,0x00,0x28,0xe7,0xa1,0x00,0x80,0x00,0x00,0x00,0x40,0x00,0x40,0x91,0x14,
0x00,0x00,0x00,0xc8,0x40,0x80,0x20,0x14,0x00,0x00,0x00,0x58,0x40,0xc0,0x30,0x14,
0x00,0x00,0x00,0x58,0x40,0x00,0x41,0x14,0x00,0x00,0x00,0x58,0x03,0x1c,0x00,0x20,
0x00,0x40,0x00,0x48,0x85,0x9c,0x70,0x30,0x00,0x00,0x00,0x90,0x85,0xdc,0x70,0x40,
0x00,0x00,0x00,0x90,0x03,0xdc,0x01,0xc0,0x00,0x40,0x8e,0x18,0x85,0x1c,0x71,0x50,
0x00,0x00,0x00,0x90,0xe7,0x01,0x00,0x40,0xfc,0xff,0x03,0x40,0xe7,0x1d,0x00,0x00,
0x00,0x00,0x00,0x80,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x00,0x00,
0x03,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x34,0x00,0x00,0x00,
0xa0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x0b,0x00,0x00,0x00,0x03,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0xd4,0x00,0x00,0x00,0xbc,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x13,0x00,0x00,0x00,
0x02,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x90,0x01,0x00,0x00,
0x40,0x00,0x00,0x00,0x02,0x00,0x00,0x00,0x02,0x00,0x00,0x00,0x04,0x00,0x00,0x00,
0x10,0x00,0x00,0x00,0x36,0x00,0x00,0x00,0x00,0x00,0x00,0x70,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0xd0,0x01,0x00,0x00,0x68,0x00,0x00,0x00,0x03,0x00,0x00,0x00,
0x07,0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x74,0x00,0x00,0x00,
0x00,0x00,0x00,0x70,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x38,0x02,0x00,0x00,
0x18,0x00,0x00,0x00,0x03,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x04,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x7d,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x02,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x50,0x02,0x00,0x00,0x34,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x07,0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x1b,0x00,0x00,0x00,
0x01,0x00,0x00,0x00,0x06,0x00,0x10,0x00,0x00,0x00,0x00,0x00,0x84,0x02,0x00,0x00,
0x48,0x03,0x00,0x00,0x03,0x00,0x00,0x00,0x03,0x00,0x00,0x11,0x04,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x06,0x00,0x00,0x00,0x0c,0x07,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x60,0x00,0x00,0x00,0x60,0x00,0x00,0x00,0x05,0x00,0x00,0x00,
0x04,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x50,0x02,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x7c,0x03,0x00,0x00,0x7c,0x03,0x00,0x00,0x05,0x00,0x00,0x00,
0x04,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x06,0x00,0x00,0x00,
0x04,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x02,0x00,0x01,0x01,0x58,0x00,0x00,0x00,
0x20,0x09,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x07,0x00,0x01,0x00,0x1e,0x00,0x00,0x00,0x40,0x00,0x00,0x00,0x15,0x00,0x00,0x00,
0x44,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x52,0x65,0x6e,0x64,0x65,0x72,0x43,0x6c,
0x6f,0x74,0x68,0x4e,0x6f,0x72,0x6d,0x61,0x6c,0x73,0x2e,0x63,0x75,0x00,0x00,0x00,
0x7f,0x45,0x4c,0x46,0x01,0x01,0x01,0x33,0x07,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x02,0x00,0xbe,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xc0,0x08,0x00,0x00,
0x80,0x07,0x00,0x00,0x1e,0x01,0x1e,0x00,0x34,0x00,0x20,0x00,0x03,0x00,0x28,0x00,
0x08,0x00,0x01,0x00,0x00,0x2e,0x73,0x68,0x73,0x74,0x72,0x74,0x61,0x62,0x00,0x2e,
0x73,0x74,0x72,0x74,0x61,0x62,0x00,0x2e,0x73,0x79,0x6d,0x74,0x61,0x62,0x00,0x2e,
0x74,0x65,0x78,0x74,0x2e,0x63,0x6f,0x6d,0x70,0x75,0x74,0x65,0x53,0x6d,0x6f,0x6f,
0x74,0x68,0x4e,0x6f,0x72,0x6d,0x61,0x6c,0x73,0x00,0x2e,0x6e,0x76,0x2e,0x69,0x6e,
0x66,0x6f,0x2e,0x63,0x6f,0x6d,0x70,0x75,0x74,0x65,0x53,0x6d,0x6f,0x6f,0x74,0x68,
0x4e,0x6f,0x72,0x6d,0x61,0x6c,0x73,0x00,0x2e,0x6e,0x76,0x2e,0x73,0x68,0x61,0x72,
0x65,0x64,0x2e,0x63,0x6f,0x6d,0x70,0x75,0x74,0x65,0x53,0x6d,0x6f,0x6f,0x74,0x68,
0x4e,0x6f,0x72,0x6d,0x61,0x6c,0x73,0x00,0x2e,0x6e,0x76,0x2e,0x69,0x6e,0x66,0x6f,
0x00,0x2e,0x6e,0x76,0x2e,0x63,0x6f,0x6e,0x73,0x74,0x61,0x6e,0x74,0x30,0x2e,0x63,
0x6f,0x6d,0x70,0x75,0x74,0x65,0x53,0x6d,0x6f,0x6f,0x74,0x68,0x4e,0x6f,0x72,0x6d,
0x61,0x6c,0x73,0x00,0x00,0x2e,0x73,0x68,0x73,0x74,0x72,0x74,0x61,0x62,0x00,0x2e,
0x73,0x74,0x72,0x74,0x61,0x62,0x00,0x2e,0x73,0x79,0x6d,0x74,0x61,0x62,0x00,0x63,
0x6f,0x6d,0x70,0x75,0x74,0x65,0x53,0x6d,0x6f,0x6f,0x74,0x68,0x4e,0x6f,0x72,0x6d,
0x61,0x6c,0x73,0x00,0x2e,0x74,0x65,0x78,0x74,0x2e,0x63,0x6f,0x6d,0x70,0x75,0x74,
0x65,0x53,0x6d,0x6f,0x6f,0x74,0x68,0x4e,0x6f,0x72,0x6d,0x61,0x6c,0x73,0x00,0x2e,
0x6e,0x76,0x2e,0x69,0x6e,0x66,0x6f,0x2e,0x63,0x6f,0x6d,0x70,0x75,0x74,0x65,0x53,
0x6d,0x6f,0x6f,0x74,0x68,0x4e,0x6f,0x72,0x6d,0x61,0x6c,0x73,0x00,0x2e,0x6e,0x76,
0x2e,0x73,0x68,0x61,0x72,0x65,0x64,0x2e,0x63,0x6f,0x6d,0x70,0x75,0x74,0x65,0x53,
0x6d,0x6f,0x6f,0x74,0x68,0x4e,0x6f,0x72,0x6d,0x61,0x6c,0x73,0x00,0x2e,0x6e,0x76,
0x2e,0x69,0x6e,0x66,0x6f,0x00,0x2e,0x6e,0x76,0x2e,0x63,0x6f,0x6e,0x73,0x74,0x61,
0x6e,0x74,0x30,0x2e,0x63,0x6f,0x6d,0x70,0x75,0x74,0x65,0x53,0x6d,0x6f,0x6f,0x74,
0x68,0x4e,0x6f,0x72,0x6d,0x61,0x6c,0x73,0x00,0x5f,0x70,0x61,0x72,0x61,0x6d,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x30,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x03,0x00,0x07,0x00,
0x92,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x03,0x00,0x06,0x00,
0x1b,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xc0,0x03,0x00,0x00,0x12,0x10,0x07,0x00,
0x04,0x0a,0x08,0x00,0x02,0x00,0x00,0x00,0x40,0x01,0x14,0x00,0x03,0x19,0x14,0x00,
0x04,0x17,0x0c,0x00,0x00,0x00,0x00,0x00,0x04,0x00,0x10,0x00,0x00,0xf0,0x11,0x00,
0x04,0x17,0x0c,0x00,0x00,0x00,0x00,0x00,0x03,0x00,0x0c,0x00,0x00,0xf0,0x11,0x00,
0x04,0x17,0x0c,0x00,0x00,0x00,0x00,0x00,0x02,0x00,0x08,0x00,0x00,0xf0,0x11,0x00,
0x04,0x17,0x0c,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x04,0x00,0x00,0xf0,0x11,0x00,
0x04,0x17,0x0c,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xf0,0x11,0x00,
0x04,0x1e,0x04,0x00,0x10,0x02,0x00,0x00,0x04,0x12,0x08,0x00,0x03,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x04,0x11,0x08,0x00,0x03,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0xf7,0x42,0x80,0xc2,0x22,0x42,0xf0,0x22,0xe4,0x5d,0x00,0x10,0x01,0x40,0x00,0x28,
0x04,0x1c,0x00,0x84,0x00,0x00,0x00,0x2c,0x07,0x00,0x00,0xc0,0x01,0x00,0x00,0x60,
0x03,0xdc,0x01,0x40,0x05,0x40,0x0e,0x1b,0xe7,0x01,0x00,0x40,0x01,0x00,0x00,0x40,
0xe4,0x9d,0x00,0x00,0x00,0x00,0x00,0x28,0xe4,0x1d,0x01,0x20,0x05,0x40,0x00,0x28,
0x47,0x80,0x42,0xe0,0xe2,0x42,0xe0,0x22,0xa3,0xdc,0x20,0x60,0x00,0xc0,0x08,0x20,
0x03,0x9c,0x20,0xa0,0x00,0x40,0x00,0x48,0x85,0xdc,0x3f,0x30,0x00,0x00,0x00,0x90,
0x03,0xdc,0x21,0x40,0x05,0x40,0x8e,0x18,0x85,0xdc,0x3f,0x40,0x00,0x00,0x00,0x90,
0x85,0xdc,0x3f,0x50,0x00,0x00,0x00,0x90,0xe7,0x01,0x00,0x00,0xff,0xff,0x03,0x40,
0x07,0x10,0x43,0xc0,0x22,0x42,0xf0,0x22,0xf4,0x1d,0x00,0x00,0x00,0x00,0x00,0x40,
0x04,0x1c,0x00,0x00,0x00,0xc0,0x0e,0x50,0x03,0xdc,0x01,0x30,0x05,0x40,0x0e,0x1b,
0x07,0x00,0x00,0xe0,0x06,0x00,0x00,0x60,0xe7,0x01,0x00,0xa0,0x06,0x00,0x00,0x40,
0xe4,0x9d,0x00,0x00,0x00,0x00,0x00,0x28,0xe4,0xdd,0x00,0x20,0x05,0x40,0x00,0x28,
0x47,0x80,0x82,0x42,0x70,0x43,0x30,0x22,0xa3,0x1c,0x21,0x0c,0x00,0xc0,0x00,0x50,
0x03,0x9c,0x20,0xa0,0x00,0x40,0x00,0x48,0x23,0x9c,0x41,0x10,0x05,0x40,0x00,0x40,
0x45,0xdc,0x63,0x00,0x00,0x00,0x00,0x80,0x03,0xdc,0x21,0x30,0x05,0x40,0x8e,0x18,
0x45,0x1c,0x61,0x08,0x00,0x00,0x00,0x80,0x83,0x5c,0xf4,0x00,0x05,0x40,0x00,0x40,
0x37,0x43,0x30,0x42,0x40,0x42,0x70,0x23,0x45,0x5c,0x61,0x10,0x00,0x00,0x00,0x80,
0x85,0xdc,0x12,0x21,0x00,0x00,0x00,0x80,0x83,0x9c,0x44,0x00,0x05,0x40,0x00,0x40,
0x83,0x1c,0x54,0x00,0x05,0x40,0x00,0x40,0x85,0x5c,0x13,0x11,0x00,0x00,0x00,0x80,
0x85,0x1c,0x23,0x21,0x00,0x00,0x00,0x80,0xa3,0x1c,0x41,0x60,0x00,0xc0,0x06,0x20,
0x47,0x70,0x43,0x30,0x32,0x32,0xf2,0x22,0x85,0x9c,0x03,0x11,0x00,0x00,0x00,0x80,
0x20,0xdd,0xc4,0x2c,0x00,0x00,0x00,0x50,0x85,0x9c,0x02,0x21,0x00,0x00,0x00,0x80,
0x20,0x1d,0xe3,0x34,0x00,0x00,0x00,0x50,0x85,0x5c,0x22,0x11,0x00,0x00,0x00,0x80,
0x85,0xdc,0x11,0x01,0x00,0x00,0x00,0x80,0x85,0x1c,0x22,0x01,0x00,0x00,0x00,0x80,
0x47,0x00,0x02,0x52,0x42,0x80,0x82,0x22,0x20,0x5d,0x93,0x34,0x00,0x00,0x00,0x50,
0x85,0x9c,0x01,0x01,0x00,0x00,0x00,0x80,0xa3,0x5c,0xf2,0x60,0x00,0xc0,0x06,0x20,
0x20,0xdd,0xa2,0x2c,0x00,0x00,0x00,0x50,0x40,0x9c,0xc2,0x4c,0x00,0x00,0x00,0x58,
0x20,0xdd,0x83,0x1c,0x00,0x00,0x00,0x50,0x40,0x1e,0xb2,0x34,0x00,0x00,0x14,0x30,
0x47,0x00,0x82,0x42,0x30,0x42,0x02,0x22,0x20,0x9d,0x62,0x1c,0x00,0x00,0x00,0x50,
0x05,0x1e,0x92,0x30,0x00,0x00,0x00,0x28,0x40,0x9c,0xb1,0x3c,0x00,0x00,0x00,0x58,
0x40,0x9e,0xa1,0x4c,0x00,0x00,0x0c,0x30,0x40,0x9c,0xa2,0x34,0x00,0x00,0x00,0x58,
0xa3,0x5c,0x51,0x60,0x00,0xc0,0x06,0x20,0x05,0x9e,0x91,0x40,0x00,0x00,0x00,0x28,
0xd7,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0x22,0x40,0xde,0xc1,0x3c,0x00,0x00,0x14,0x30,
0x05,0xde,0x91,0x50,0x00,0x00,0x00,0x28,0x05,0x1e,0x42,0x30,0x00,0x00,0x00,0x28,
0x05,0x9e,0x41,0x40,0x00,0x00,0x00,0x28,0x05,0xde,0x41,0x50,0x00,0x00,0x00,0x28,
0x05,0x1e,0x52,0x30,0x00,0x00,0x00,0x28,0x05,0x9e,0x51,0x40,0x00,0x00,0x00,0x28,
0x47,0xe0,0xf2,0x12,0xe3,0x42,0xf0,0x22,0x05,0xde,0x51,0x50,0x00,0x00,0x00,0x28,
0xe7,0x01,0x00,0xa0,0xf9,0xff,0x03,0x40,0x13,0xdc,0x01,0x40,0x05,0x40,0x8e,0x18,
0x04,0x1c,0x00,0x00,0x00,0xc0,0x0e,0x50,0xe7,0x21,0x00,0x00,0x00,0x00,0x00,0x80,
0xe4,0xdd,0x02,0x20,0x05,0x40,0x00,0x28,0xe4,0x1d,0x00,0x00,0x00,0x00,0x00,0x40,
0x87,0x42,0x70,0xe3,0x72,0xe3,0x72,0x23,0x83,0xdc,0x01,0x00,0x05,0x40,0x00,0x40,
0x85,0x9c,0x70,0x00,0x00,0x00,0x00,0x80,0xa3,0x9c,0x01,0x60,0x00,0xc0,0x16,0x20,
0x85,0x9c,0x60,0x00,0x00,0x00,0x00,0x90,0x85,0x5c,0x71,0x10,0x00,0x00,0x00,0x80,
0x85,0x5c,0x61,0x10,0x00,0x00,0x00,0x90,0x85,0xdc,0x60,0x40,0x00,0x00,0x00,0x80,
0x47,0x70,0x43,0x70,0x43,0x80,0xe2,0x22,0x85,0x9c,0x60,0x30,0x00,0x00,0x00,0x80,
0x40,0x5c,0x32,0x0c,0x00,0x00,0x00,0x58,0x85,0x1c,0x61,0x50,0x00,0x00,0x00,0x80,
0x40,0x1c,0x22,0x08,0x00,0x00,0x12,0x30,0x85,0x5c,0x71,0x20,0x00,0x00,0x00,0x80,
0x40,0x1c,0x42,0x10,0x00,0x00,0x10,0x30,0x00,0xdc,0x81,0xfc,0x00,0x00,0x0e,0x2a,
0x47,0x00,0x40,0x00,0x42,0x00,0x80,0x22,0x85,0x5c,0x61,0x20,0x00,0x00,0x00,0x90,
0xe7,0x81,0x00,0x80,0x00,0x00,0x00,0x40,0xe4,0xa1,0x00,0xfc,0x00,0x00,0x00,0x28,
0xe4,0xe1,0x00,0xfc,0x00,0x00,0x00,0x28,0xe4,0x21,0x01,0xfc,0x00,0x00,0x00,0x28,
0xe7,0xa1,0x00,0xa0,0x00,0x00,0x00,0x40,0x00,0x40,0x81,0x14,0x00,0x00,0x00,0xc8,
0x47,0x00,0x02,0x00,0xe2,0x42,0xe0,0x22,0x40,0x80,0x20,0x14,0x00,0x00,0x00,0x58,
0x40,0xc0,0x30,0x14,0x00,0x00,0x00,0x58,0x40,0x00,0x41,0x14,0x00,0x00,0x00,0x58,
0x03,0x1c,0x00,0xa0,0x00,0x40,0x00,0x48,0x85,0x9c,0x60,0x30,0x00,0x00,0x00,0x90,
0x03,0xdc,0x01,0x40,0x05,0x40,0x8e,0x18,0x85,0xdc,0x60,0x40,0x00,0x00,0x00,0x90,
0x47,0xe0,0xe2,0x02,0x00,0x00,0x00,0x20,0x85,0x1c,0x61,0x50,0x00,0x00,0x00,0x90,
0xe7,0x01,0x00,0xa0,0xfb,0xff,0x03,0x40,0xe7,0x1d,0x00,0x00,0x00,0x00,0x00,0x80,
0xe7,0x1d,0x00,0xe0,0xff,0xff,0x03,0x40,0xe4,0x1d,0x00,0x00,0x00,0x00,0x00,0x40,
0xe4,0x1d,0x00,0x00,0x00,0x00,0x00,0x40,0xe4,0x1d,0x00,0x00,0x00,0x00,0x00,0x40,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x03,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x34,0x00,0x00,0x00,0xa0,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x0b,0x00,0x00,0x00,0x03,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0xd4,0x00,0x00,0x00,0xbc,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x13,0x00,0x00,0x00,0x02,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x90,0x01,0x00,0x00,0x40,0x00,0x00,0x00,
0x02,0x00,0x00,0x00,0x02,0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x10,0x00,0x00,0x00,
0x36,0x00,0x00,0x00,0x00,0x00,0x00,0x70,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0xd0,0x01,0x00,0x00,0x68,0x00,0x00,0x00,0x03,0x00,0x00,0x00,0x07,0x00,0x00,0x00,
0x04,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x74,0x00,0x00,0x00,0x00,0x00,0x00,0x70,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x38,0x02,0x00,0x00,0x18,0x00,0x00,0x00,
0x03,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x7d,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x02,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x50,0x02,0x00,0x00,0x54,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x00,0x00,0x00,
0x04,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x1b,0x00,0x00,0x00,0x01,0x00,0x00,0x00,
0x06,0x00,0x10,0x00,0x00,0x00,0x00,0x00,0xc0,0x03,0x00,0x00,0xc0,0x03,0x00,0x00,
0x03,0x00,0x00,0x00,0x03,0x00,0x00,0x14,0x40,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x06,0x00,0x00,0x00,0xc0,0x08,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x60,0x00,0x00,0x00,0x60,0x00,0x00,0x00,0x05,0x00,0x00,0x00,0x04,0x00,0x00,0x00,
0x01,0x00,0x00,0x00,0x50,0x02,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x14,0x05,0x00,0x00,0x14,0x05,0x00,0x00,0x05,0x00,0x00,0x00,0x04,0x00,0x00,0x00,
0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x06,0x00,0x00,0x00,0x04,0x00,0x00,0x00,
0x02,0x00,0x01,0x01,0x58,0x00,0x00,0x00,0x20,0x0a,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x00,0x01,0x00,0x32,0x00,0x00,0x00,
0x40,0x00,0x00,0x00,0x15,0x00,0x00,0x00,0x44,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x52,0x65,0x6e,0x64,0x65,0x72,0x43,0x6c,0x6f,0x74,0x68,0x4e,0x6f,0x72,0x6d,0x61,
0x6c,0x73,0x2e,0x63,0x75,0x00,0x00,0x00,0x7f,0x45,0x4c,0x46,0x01,0x01,0x01,0x33,
0x07,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x02,0x00,0xbe,0x00,0x01,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0xc0,0x09,0x00,0x00,0x80,0x08,0x00,0x00,0x32,0x01,0x32,0x00,
0x34,0x00,0x20,0x00,0x03,0x00,0x28,0x00,0x08,0x00,0x01,0x00,0x00,0x2e,0x73,0x68,
0x73,0x74,0x72,0x74,0x61,0x62,0x00,0x2e,0x73,0x74,0x72,0x74,0x61,0x62,0x00,0x2e,
0x73,0x79,0x6d,0x74,0x61,0x62,0x00,0x2e,0x74,0x65,0x78,0x74,0x2e,0x63,0x6f,0x6d,
0x70,0x75,0x74,0x65,0x53,0x6d,0x6f,0x6f,0x74,0x68,0x4e,0x6f,0x72,0x6d,0x61,0x6c,
0x73,0x00,0x2e,0x6e,0x76,0x2e,0x69,0x6e,0x66,0x6f,0x2e,0x63,0x6f,0x6d,0x70,0x75,
0x74,0x65,0x53,0x6d,0x6f,0x6f,0x74,0x68,0x4e,0x6f,0x72,0x6d,0x61,0x6c,0x73,0x00,
0x2e,0x6e,0x76,0x2e,0x73,0x68,0x61,0x72,0x65,0x64,0x2e,0x63,0x6f,0x6d,0x70,0x75,
0x74,0x65,0x53,0x6d,0x6f,0x6f,0x74,0x68,0x4e,0x6f,0x72,0x6d,0x61,0x6c,0x73,0x00,
0x2e,0x6e,0x76,0x2e,0x69,0x6e,0x66,0x6f,0x00,0x2e,0x6e,0x76,0x2e,0x63,0x6f,0x6e,
0x73,0x74,0x61,0x6e,0x74,0x30,0x2e,0x63,0x6f,0x6d,0x70,0x75,0x74,0x65,0x53,0x6d,
0x6f,0x6f,0x74,0x68,0x4e,0x6f,0x72,0x6d,0x61,0x6c,0x73,0x00,0x00,0x2e,0x73,0x68,
0x73,0x74,0x72,0x74,0x61,0x62,0x00,0x2e,0x73,0x74,0x72,0x74,0x61,0x62,0x00,0x2e,
0x73,0x79,0x6d,0x74,0x61,0x62,0x00,0x63,0x6f,0x6d,0x70,0x75,0x74,0x65,0x53,0x6d,
0x6f,0x6f,0x74,0x68,0x4e,0x6f,0x72,0x6d,0x61,0x6c,0x73,0x00,0x2e,0x74,0x65,0x78,
0x74,0x2e,0x63,0x6f,0x6d,0x70,0x75,0x74,0x65,0x53,0x6d,0x6f,0x6f,0x74,0x68,0x4e,
0x6f,0x72,0x6d,0x61,0x6c,0x73,0x00,0x2e,0x6e,0x76,0x2e,0x69,0x6e,0x66,0x6f,0x2e,
0x63,0x6f,0x6d,0x70,0x75,0x74,0x65,0x53,0x6d,0x6f,0x6f,0x74,0x68,0x4e,0x6f,0x72,
0x6d,0x61,0x6c,0x73,0x00,0x2e,0x6e,0x76,0x2e,0x73,0x68,0x61,0x72,0x65,0x64,0x2e,
0x63,0x6f,0x6d,0x70,0x75,0x74,0x65,0x53,0x6d,0x6f,0x6f,0x74,0x68,0x4e,0x6f,0x72,
0x6d,0x61,0x6c,0x73,0x00,0x2e,0x6e,0x76,0x2e,0x69,0x6e,0x66,0x6f,0x00,0x2e,0x6e,
0x76,0x2e,0x63,0x6f,0x6e,0x73,0x74,0x61,0x6e,0x74,0x30,0x2e,0x63,0x6f,0x6d,0x70,
0x75,0x74,0x65,0x53,0x6d,0x6f,0x6f,0x74,0x68,0x4e,0x6f,0x72,0x6d,0x61,0x6c,0x73,
0x00,0x5f,0x70,0x61,0x72,0x61,0x6d,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x30,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x03,0x00,0x07,0x00,0x92,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x03,0x00,0x06,0x00,0x1b,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0xc0,0x04,0x00,0x00,0x12,0x10,0x07,0x00,0x04,0x0a,0x08,0x00,0x02,0x00,0x00,0x00,
0x40,0x01,0x14,0x00,0x03,0x19,0x14,0x00,0x04,0x17,0x0c,0x00,0x00,0x00,0x00,0x00,
0x04,0x00,0x10,0x00,0x00,0xf0,0x11,0x00,0x04,0x17,0x0c,0x00,0x00,0x00,0x00,0x00,
0x03,0x00,0x0c,0x00,0x00,0xf0,0x11,0x00,0x04,0x17,0x0c,0x00,0x00,0x00,0x00,0x00,
0x02,0x00,0x08,0x00,0x00,0xf0,0x11,0x00,0x04,0x17,0x0c,0x00,0x00,0x00,0x00,0x00,
0x01,0x00,0x04,0x00,0x00,0xf0,0x11,0x00,0x04,0x17,0x0c,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0xf0,0x11,0x00,0x04,0x1e,0x04,0x00,0x10,0x02,0x00,0x00,
0x04,0x1c,0x10,0x00,0x48,0x02,0x00,0x00,0x60,0x03,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x04,0x12,0x08,0x00,0x03,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x04,0x11,0x08,0x00,0x03,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xe6,0x07,0x20,0xe2,0x00,0xf4,0x1f,0x00,
0x01,0x00,0x87,0x00,0x80,0x07,0x98,0x4c,0x00,0x00,0x17,0x02,0x00,0x00,0xc8,0xf0,
0x00,0x00,0x00,0x09,0x00,0x00,0x90,0xe2,0xed,0x0f,0xa0,0xff,0x00,0x98,0x1f,0x00,
0x07,0x00,0x47,0x05,0x80,0x03,0x6c,0x4b,0x0f,0x00,0x00,0x07,0x00,0x00,0x40,0xe2,
0x02,0x00,0x07,0x00,0x80,0x07,0x98,0x5c,0xe6,0x07,0xc0,0xfc,0x00,0x88,0x1f,0x00,
0x03,0x00,0x27,0x05,0x80,0x07,0x98,0x4c,0x03,0x02,0x87,0x01,0x80,0x01,0x00,0x36,
0x03,0x02,0x87,0x01,0x90,0x01,0x20,0x36,0xf0,0x07,0x20,0x1e,0x00,0xc4,0x07,0x00,
0x02,0x02,0x27,0x00,0x00,0x00,0x10,0x4c,0xff,0x03,0xc7,0x00,0x00,0x00,0xdc,0xee,
0xff,0x03,0x07,0x01,0x00,0x00,0xdc,0xee,0xf4,0x02,0xa0,0xfd,0x00,0xf4,0xff,0x00,
0xff,0x03,0x47,0x01,0x00,0x00,0xdc,0xee,0x07,0x02,0x47,0x05,0x80,0x03,0x62,0x4b,
0x0f,0x00,0x80,0xfa,0xff,0x0f,0x40,0xe2,0xfd,0x07,0xe0,0xff,0x00,0x84,0x1f,0x00,
0x0f,0x00,0x07,0x00,0x00,0x00,0xf8,0xf0,0x00,0x00,0x07,0x00,0x80,0x1b,0xa8,0xf0,
0x07,0x00,0x37,0x05,0x80,0x03,0x6c,0x4b,0xec,0x07,0xa0,0xff,0x00,0x98,0x1f,0x00,
0x00,0x00,0x80,0x22,0x00,0x00,0x90,0xe2,0x0f,0x00,0x80,0x21,0x00,0x00,0x40,0xe2,
0x02,0x00,0x07,0x00,0x80,0x07,0x98,0x5c,0xe6,0x07,0xc0,0xfc,0x00,0x88,0x1f,0x00,
0x03,0x02,0x37,0x00,0x80,0x7f,0x00,0x36,0x03,0x02,0x37,0x00,0x90,0x01,0x20,0x36,
0x05,0x03,0x17,0x05,0x80,0x00,0x18,0x4c,0xb1,0x07,0x20,0xf6,0x00,0xc8,0x1e,0x00,
0x0e,0x05,0x07,0x00,0x00,0x00,0xd2,0xee,0x04,0x05,0x27,0x00,0x00,0x00,0xd2,0xee,
0x03,0x05,0x47,0x00,0x00,0x00,0xd2,0xee,0xe2,0x07,0x21,0xfe,0x00,0xc4,0x1f,0x00,
0x08,0x0e,0x07,0x05,0x00,0x02,0x18,0x4c,0x06,0x08,0x07,0x00,0x00,0x00,0xd4,0xee,
0x0d,0x08,0x47,0x00,0x00,0x00,0xd4,0xee,0xf1,0x00,0x40,0xfc,0x00,0xc0,0x1f,0x00,
0x0a,0x08,0x87,0x00,0x00,0x00,0xd4,0xee,0x10,0x04,0x07,0x05,0x00,0x02,0x18,0x4c,
0x0f,0x03,0x07,0x05,0x00,0x02,0x18,0x4c,0xb1,0x07,0x20,0xfe,0x00,0xc4,0x1e,0x00,
0x0b,0x10,0x87,0x00,0x00,0x00,0xd4,0xee,0x07,0x10,0x07,0x00,0x00,0x00,0xd4,0xee,
0x09,0x0f,0x87,0x00,0x00,0x00,0xd4,0xee,0xb1,0x07,0xa0,0x37,0x00,0xc4,0x3e,0x00,
0x0c,0x0f,0x47,0x00,0x00,0x00,0xd4,0xee,0x05,0x0f,0x07,0x00,0x00,0x00,0xd4,0xee,
0x08,0x10,0x47,0x00,0x00,0x00,0xd4,0xee,0xe1,0x07,0xc0,0xfc,0x00,0x84,0x1f,0x00,
0x15,0x00,0x27,0x05,0x80,0x07,0x98,0x4c,0x02,0x02,0x27,0x00,0x00,0x00,0x10,0x4c,
0x07,0x02,0x37,0x05,0x80,0x03,0x62,0x4b,0xe1,0x07,0x21,0xfc,0x02,0x84,0x1f,0x00,
0x0b,0x0b,0xa7,0x00,0x00,0x30,0x58,0x5c,0x0f,0x09,0xa7,0x00,0x00,0x30,0x58,0x5c,
0x0a,0x07,0x67,0x00,0x00,0x30,0x58,0x5c,0xe1,0x07,0x20,0xfc,0x00,0x8c,0x1f,0x00,
0x09,0x0c,0xd7,0x00,0x00,0x30,0x58,0x5c,0x11,0x05,0x67,0x00,0x00,0x30,0x58,0x5c,
0x05,0x0e,0x87,0x01,0x80,0x0a,0x00,0x36,0xe1,0x07,0x20,0xfc,0x00,0x84,0x1f,0x00,
0x0c,0x0f,0xa7,0x00,0x00,0x10,0x68,0x5c,0x08,0x08,0xd7,0x00,0x00,0x30,0x58,0x5c,
0x06,0x09,0xb7,0x00,0x00,0x10,0x68,0x5c,0xe3,0x07,0x20,0xfc,0x00,0x84,0x1f,0x00,
0x0d,0x04,0x87,0x01,0x80,0x0a,0x00,0x36,0x07,0x11,0xb7,0x00,0x00,0x06,0xa1,0x59,
0x10,0x11,0x87,0x00,0x00,0x10,0x68,0x5c,0xe1,0x07,0x40,0xfc,0x00,0xc0,0x1f,0x00,
0x05,0x0e,0x87,0x01,0x90,0x02,0x20,0x36,0x06,0x0f,0x87,0x00,0x00,0x03,0xa1,0x59,
0x0c,0x03,0x87,0x01,0x80,0x0a,0x00,0x36,0xf1,0x00,0x00,0xfe,0x00,0xc4,0x07,0x00,
0x06,0x05,0x37,0xc0,0x00,0x00,0xf8,0xeb,0x04,0x04,0x87,0x01,0x90,0x06,0x20,0x36,
0x07,0x05,0x37,0x00,0x01,0x00,0xf8,0xeb,0xe2,0x07,0x20,0x5e,0x00,0xc4,0x0f,0x00,
0x08,0x09,0xa7,0x00,0x00,0x08,0xa1,0x59,0x08,0x05,0x37,0x40,0x01,0x00,0xf8,0xeb,
0x06,0x04,0x37,0xc0,0x00,0x00,0xf8,0xeb,0xf0,0x07,0x20,0x9e,0x00,0xc4,0x03,0x00,
0x03,0x03,0x87,0x01,0x10,0x06,0x20,0x36,0x07,0x04,0x37,0x00,0x01,0x00,0xf8,0xeb,
0x08,0x04,0x37,0x40,0x01,0x00,0xf8,0xeb,0xf1,0x00,0x20,0x1e,0x00,0xc8,0x03,0x00,
0x06,0x03,0x37,0xc0,0x00,0x00,0xf8,0xeb,0x07,0x03,0x37,0x00,0x01,0x00,0xf8,0xeb,
0x08,0x03,0x37,0x40,0x01,0x00,0xf8,0xeb,0xfd,0xff,0xa0,0xff,0x00,0x84,0x1f,0x00,
0x0f,0x00,0x80,0xdf,0xff,0x0f,0x40,0xe2,0x0f,0x00,0x07,0x00,0x00,0x00,0xf8,0xf0,
0x07,0x00,0x47,0x05,0x80,0x03,0x62,0x4b,0xff,0x07,0xa0,0xff,0x00,0xc0,0x1f,0x00,
0x00,0x00,0x07,0x00,0x80,0x1b,0xa8,0xf0,0x0f,0x00,0x08,0x00,0x00,0x00,0x00,0xe3,
0x05,0x00,0x07,0x05,0x00,0x02,0x18,0x4c,0xf2,0x07,0x20,0xf6,0x00,0x98,0x1f,0x00,
0x00,0x00,0x80,0x11,0x00,0x00,0x90,0xe2,0x02,0x05,0x07,0x00,0x00,0x00,0xd4,0xee,
0x03,0x00,0x27,0x05,0x80,0x07,0x98,0x4c,0xe6,0x07,0x40,0xfc,0x00,0xc4,0x1f,0x04,
0x03,0x00,0x87,0x01,0x80,0x01,0x00,0x36,0x07,0x00,0x87,0x01,0x90,0x01,0x20,0x36,
0x02,0x07,0x07,0x00,0x00,0x00,0xdc,0xee,0xb2,0x07,0x20,0xfe,0x20,0xc4,0x1e,0x00,
0x06,0x05,0x47,0x00,0x00,0x00,0xd4,0xee,0x06,0x07,0x47,0x00,0x00,0x00,0xdc,0xee,
0x03,0x07,0x07,0x01,0x00,0x00,0xd4,0xee,0xf1,0x07,0x20,0xfe,0x00,0xc8,0x1e,0x00,
0x02,0x07,0xc7,0x00,0x00,0x00,0xd4,0xee,0x04,0x07,0x47,0x01,0x00,0x00,0xd4,0xee,
0x05,0x05,0x87,0x00,0x00,0x00,0xd4,0xee,0xf0,0x07,0xc1,0x1e,0x00,0x98,0x1f,0x00,
0x08,0x03,0x37,0x00,0x00,0x10,0x68,0x5c,0x05,0x07,0x87,0x00,0x00,0x00,0xdc,0xee,
0x06,0x02,0x27,0x00,0x00,0x04,0xa0,0x59,0xe6,0x07,0xa0,0xfd,0x00,0xf4,0x3f,0x00,
0x06,0x04,0x47,0x00,0x00,0x03,0xa0,0x59,0x07,0x06,0xf7,0x0f,0x80,0x83,0xb4,0x5b,
0x0f,0x00,0x00,0x03,0x00,0x00,0x40,0xe2,0xe1,0x07,0x20,0xfc,0x00,0x84,0x1f,0x00,
0x02,0x00,0xf7,0x0f,0x80,0x07,0x98,0x5c,0x03,0x00,0xf7,0x0f,0x80,0x07,0x98,0x5c,
0x04,0x00,0xf7,0x0f,0x80,0x07,0x98,0x5c,0xfd,0x07,0xa0,0xe3,0x00,0x84,0x3f,0x00,
0x0f,0x00,0x07,0x00,0x00,0x00,0xf8,0xf0,0x05,0x06,0x57,0x00,0x00,0x00,0x80,0x50,
0x02,0x02,0x57,0x00,0x00,0x10,0x68,0x5c,0xe1,0x07,0x00,0xfe,0x00,0xf4,0x1f,0x00,
0x03,0x03,0x57,0x00,0x00,0x10,0x68,0x5c,0x04,0x04,0x57,0x00,0x00,0x10,0x68,0x5c,
0x0f,0x00,0x07,0x00,0x00,0x00,0xf8,0xf0,0xf0,0x07,0x20,0x1e,0x00,0xc4,0x07,0x00,
0x00,0x00,0x27,0x00,0x00,0x00,0x10,0x4c,0x02,0x07,0xc7,0x00,0x00,0x00,0xdc,0xee,
0x03,0x07,0x07,0x01,0x00,0x00,0xdc,0xee,0xf4,0x02,0xa0,0xfd,0x00,0xf4,0xff,0x00,
0x04,0x07,0x47,0x01,0x00,0x00,0xdc,0xee,0x07,0x00,0x47,0x05,0x80,0x03,0x62,0x4b,
0x0f,0x00,0x80,0xe9,0xff,0x0f,0x40,0xe2,0xff,0x07,0xe0,0xff,0x00,0x80,0x1f,0x00,
0x0f,0x00,0x07,0x00,0x00,0x00,0x00,0xe3,0x0f,0x00,0x87,0xff,0xff,0x0f,0x40,0xe2,
0x00,0x0f,0x07,0x00,0x00,0x00,0xb0,0x50,0xe0,0x07,0x00,0xfc,0x00,0x80,0x1f,0x00,
0x00,0x0f,0x07,0x00,0x00,0x00,0xb0,0x50,0x00,0x0f,0x07,0x00,0x00,0x00,0xb0,0x50,
0x00,0x0f,0x07,0x00,0x00,0x00,0xb0,0x50,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x01,0x00,0x00,0x00,0x03,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x34,0x00,0x00,0x00,0xa0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0b,0x00,0x00,0x00,0x03,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xd4,0x00,0x00,0x00,0xbc,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x13,0x00,0x00,0x00,0x02,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x90,0x01,0x00,0x00,0x40,0x00,0x00,0x00,0x02,0x00,0x00,0x00,0x02,0x00,0x00,0x00,
0x04,0x00,0x00,0x00,0x10,0x00,0x00,0x00,0x36,0x00,0x00,0x00,0x00,0x00,0x00,0x70,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xd0,0x01,0x00,0x00,0x7c,0x00,0x00,0x00,
0x03,0x00,0x00,0x00,0x07,0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x74,0x00,0x00,0x00,0x00,0x00,0x00,0x70,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x4c,0x02,0x00,0x00,0x18,0x00,0x00,0x00,0x03,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x04,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x7d,0x00,0x00,0x00,0x01,0x00,0x00,0x00,
0x02,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x64,0x02,0x00,0x00,0x54,0x01,0x00,0x00,
0x00,0x00,0x00,0x00,0x07,0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x1b,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x06,0x00,0x10,0x00,0x00,0x00,0x00,0x00,
0xc0,0x03,0x00,0x00,0xc0,0x04,0x00,0x00,0x03,0x00,0x00,0x00,0x03,0x00,0x00,0x16,
0x20,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x06,0x00,0x00,0x00,0xc0,0x09,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x60,0x00,0x00,0x00,0x60,0x00,0x00,0x00,
0x05,0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x64,0x02,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x14,0x06,0x00,0x00,0x14,0x06,0x00,0x00,
0x05,0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x06,0x00,0x00,0x00,0x04,0x00,0x00,0x00
};
#ifdef __cplusplus
}
#endif
|
the_stack
|
namespace facebook { namespace deeplearning { namespace torch {
namespace detail {
namespace {
#define DIVUP(a, b) (((a) + (b) - 1) / (b))
const int SHARED_MEM_MAX_SIZE = 49152;
const int MV_N_REDUCE = 8;
const int MV_BUFFER_SIZE = 128;
// assume : bias is contiguous
// score last dim is contiguous
// score has enough elements
// score is initially filled with zeros
// shared memory buffer size == input_size * blockDim.y
// weight last dim is contiguous
// input last dim is contiguous
// mapping is contiguous
// blockIdx.x : MV columns
// blockIdx.y : minibatch
// threadIdx.x : MV line
__global__ void
updateOutputWithTargetMV(const float* input,
const float* weight,
const float* bias,
const float* mapping,
const float* n_class_in_cluster,
const float* class_start_indices,
const float* target,
const long input_stride0,
const long weight_stride0,
const long score_stride0,
long input_size,
float* score) {
__shared__ float buffer[MV_BUFFER_SIZE];
// align input and score to current sample in minibatch
input += input_stride0 * blockIdx.y;
score += score_stride0 * blockIdx.y;
// get the indices corresponding the the target
const int itarget = (int)(target[blockIdx.y] - 0.5f); // - 0.5 : 1based->0
const int cluster_target = (int)(mapping[2*itarget] - 0.5f);
const int iclass_start = (int)(class_start_indices[cluster_target] + 0.5f);
const int cluster_size = (int)(n_class_in_cluster[cluster_target] + 0.5f);
// get the bias and weight of the target cluster + correct line
const int lineIdx = blockIdx.x;
const int nLinesParallel = gridDim.x;
// do matrix vector multiply :
const int tidxx = threadIdx.x;
// loop over lines
for (int iline = lineIdx; iline < cluster_size; iline += nLinesParallel) {
const float* weight0 = weight + weight_stride0 * (iclass_start + iline);
// map
__syncthreads();
register float tmp = 0.f;
for (int i = tidxx; i < input_size; i += MV_BUFFER_SIZE)
tmp += input[i] * weight0[i];
buffer[tidxx] = tmp;
// reduce
/*
for (unsigned int stride = MV_BUFFER_SIZE >> 1; stride > 0; stride >>= 1) {
__syncthreads();
if (tidxx < stride)
buffer[tidxx] += buffer[tidxx+stride];
}
if (tidxx == 0)
score[iline] = buffer[0] + bias[iclass_start + iline];
*/
tmp = 0.f;
__syncthreads();
if (tidxx < MV_BUFFER_SIZE / MV_N_REDUCE) {
for (int i = tidxx * MV_N_REDUCE; i < (tidxx + 1) * MV_N_REDUCE; ++i)
tmp += buffer[i];
buffer[tidxx] = tmp;
}
__syncthreads();
// store result
if (tidxx == 0) {
tmp = buffer[0];
#pragma unroll
for (int i = 1; i < MV_BUFFER_SIZE / MV_N_REDUCE; ++i)
tmp += buffer[i];
score[iline] = tmp + bias[iclass_start + iline];
}
}
}
const int LSM_BUFFER_SIZE = 128;
// assume :
// mapping is contiguous
// logsum is contiguous
// score is contiguous in the last dim
// blockIdx.x : minibatch
// blockIdx.y : cluster/class
// threadIdx.x : worker
__global__ void
updateOutputWithTargetLSM(const float* target,
const float* mapping,
const float* n_class_in_cluster,
const float* class_score,
float* class_logsum,
float* cluster_score,
float* cluster_logsum,
const long class_score_stride0,
const long cluster_score_stride0,
int n_clusters,
float* loss) {
__shared__ float buffer[LSM_BUFFER_SIZE + 1];
const int tidx = threadIdx.x;
const int nthreads = blockDim.x;
const int itarget = (int)(target[blockIdx.x] - 0.5f);
const int cluster_target = (int)(mapping[2*itarget] - 0.5f);
const int idx_in_cluster_target = (int)(mapping[2*itarget+1] - 0.5f);
const int cluster_size = (int)(n_class_in_cluster[cluster_target] + 0.5f);
const float *score;
float *logsum, target_score;
int N;
if (blockIdx.y == 0) {
score = cluster_score + blockIdx.x * cluster_score_stride0;
logsum = cluster_logsum + blockIdx.x;
N = n_clusters;
target_score = score[cluster_target];
} else {
score = class_score + blockIdx.x * class_score_stride0;
logsum = class_logsum + blockIdx.x;
N = cluster_size;
target_score = score[idx_in_cluster_target];
}
// get max (from nn.LogSoftMax code)
// map
float vmax = -FLT_MAX;
for (int i = tidx; i < N; i += nthreads) {
float z = score[i];
if (vmax < z)
vmax = z;
}
buffer[tidx] = vmax;
// reduce
for (unsigned int stride = nthreads >> 1; stride > 0; stride >>= 1) {
__syncthreads();
if ((tidx < stride) && (buffer[tidx] < buffer[tidx+stride]))
buffer[tidx] = buffer[tidx+stride];
}
// store it at last position in buffer
if (tidx == 0) {
float max_k = -FLT_MAX;
if (max_k < buffer[0])
max_k = buffer[0];
buffer[LSM_BUFFER_SIZE] = max_k;
}
__syncthreads();
// logadd
// map
float max_k = buffer[LSM_BUFFER_SIZE];
buffer[tidx] = 0;
for (int i = tidx; i < N; i += nthreads)
buffer[tidx] += expf(score[i] - max_k);
// reduce
for (unsigned int stride = nthreads >> 1; stride > 0; stride >>= 1) {
__syncthreads();
if (tidx < stride)
buffer[tidx] += buffer[tidx+stride];
}
// write result
if (tidx == 0) {
float logsum_k = max_k + logf(buffer[0]);
*logsum = logsum_k;
atomicAdd(loss, logsum_k - target_score);
}
}
// assume :
// mapping is contiguous
// logsum is contiguous
// score is contiguous in the last dim
// blockIdx.x : minibatch
// blockIdx.y : cluster/class
// threadIdx.x : worker
__global__ void
updateGradInputLSM(const float* target,
const float* mapping,
const float* n_class_in_cluster,
float* class_score,
float* class_logsum,
float* cluster_score,
float* cluster_logsum,
const long class_score_stride0,
const long cluster_score_stride0,
int n_clusters) {
const int tidx = threadIdx.x;
const int nthreads = blockDim.x;
const int itarget = (int)(target[blockIdx.x] - 0.5f);
const int cluster_target = (int)(mapping[2*itarget] - 0.5f);
const int idx_in_cluster_target = (int)(mapping[2*itarget+1] - 0.5f);
const int cluster_size = (int)(n_class_in_cluster[cluster_target] + 0.5f);
float *score, logsum_k, *target_score;
int N;
if (blockIdx.y == 0) {
score = cluster_score + blockIdx.x * cluster_score_stride0;
logsum_k = cluster_logsum[blockIdx.x];
N = n_clusters;
target_score = score + cluster_target;
} else {
score = class_score + blockIdx.x * class_score_stride0;
logsum_k = class_logsum[blockIdx.x];
N = cluster_size;
target_score = score + idx_in_cluster_target;
}
for (int i = tidx; i < N; i += nthreads)
score[i] = expf(score[i] - logsum_k);
__syncthreads(); //TODO : not exactly needed
if (tidx == 0)
*target_score -= 1.f;
}
const int MV2_NLINES = 128;
__global__ void
updateGradInputMV(const float* score,
const float* weight,
const float* mapping,
const float* n_class_in_cluster,
const float* class_start_indices,
const float* target,
const long gradInput_stride0,
const long weight_stride0,
const long score_stride0,
int input_size,
float* gradInput) {
// align input and score to current sample in minibatch
gradInput += gradInput_stride0 * blockIdx.y;
score += score_stride0 * blockIdx.y;
// get the indices corresponding the the target
const int itarget = (int)(target[blockIdx.y] - 0.5f); // - 0.5 : 1based->0
const int cluster_target = (int)(mapping[2*itarget] - 0.5f);
const int iclass_start = (int)(class_start_indices[cluster_target] + 0.5f);
const int cluster_size = (int)(n_class_in_cluster[cluster_target] + 0.5f);
// get the bias and weight of the target cluster + correct line
const int colIdx = blockIdx.x * MV2_NLINES + threadIdx.x;
const int nColParallel = gridDim.x * MV2_NLINES;
// loop over lines
weight += weight_stride0 * iclass_start;
for (int icol = colIdx; icol < input_size; icol += nColParallel) {
const float* weight0 = weight + icol;
// map
register float tmp = 0.f;
for (int i = 0; i < cluster_size; ++i)
tmp += score[i] * weight0[weight_stride0 * i];
gradInput[icol] = tmp;
}
}
__global__ void
accGradParameters(const float* mapping,
const float* n_class_in_cluster,
const float* class_start_indices,
const float* target,
const float* input,
const float* score,
const int input_size,
const long input_stride0,
const long score_stride0,
const long gradWeight_stride0,
const float scale,
float* gradWeight,
float* gradBias) {
// select minibatch
input += blockIdx.x * input_stride0;
score += blockIdx.x * score_stride0;
const int itarget = (int)(target[blockIdx.x] - 0.5f); // - 0.5 : 1based->0
const int cluster_target = (int)(mapping[2*itarget] - 0.5f);
const int iclass_start = (int)(class_start_indices[cluster_target] + 0.5f);
const int cluster_size = (int)(n_class_in_cluster[cluster_target] + 0.5f);
gradWeight += iclass_start * gradWeight_stride0;
gradBias += iclass_start;
// fill shared memory
const int iline_stride = DIVUP(cluster_size, gridDim.y);
const int iline_start = blockIdx.y * iline_stride;
const int iline_end = min(iline_start + iline_stride, cluster_size);
const int iline_n = iline_end - iline_start;
const int tidx = threadIdx.y * blockDim.x + threadIdx.x;
const int nthreads = blockDim.x * blockDim.y;
extern __shared__ float shared_input[];
float* shared_score = shared_input + input_size;
for (int i = tidx; i < input_size; i += nthreads)
shared_input[i] = input[i];
for (int i = tidx; i < iline_n; i += nthreads)
shared_score[i] = score[iline_start + i];
gradBias += iline_start;
gradWeight += iline_start * gradWeight_stride0;
__syncthreads();
// outer product
for (int iline = threadIdx.y; iline < iline_n; iline += blockDim.y) {
float* gradWeight0 = gradWeight + iline * gradWeight_stride0;
register const float score_cur = scale * shared_score[iline];
for (int icol = threadIdx.x; icol < input_size; icol += blockDim.x)
atomicAdd(gradWeight0 + icol, score_cur * shared_input[icol]);
if (threadIdx.x == 0)
atomicAdd(gradBias + iline, score_cur);
}
}
} // namespace
void launchUpdateOutputWithTargetKernel(
cudaStream_t stream,
const float* input,
const float* class_weight,
const float* class_bias,
const float* mapping,
const float* n_class_in_cluster,
const float* class_start_indices,
const float* target,
const long* input_strides,
const long* class_weight_strides,
const long* class_score_strides,
const long* cluster_score_strides,
const long input_size,
const long minibatch_size,
const long n_max_class_per_cluster,
const long n_clusters,
float* class_score,
float* class_logsum,
float* cluster_score,
float* cluster_logsum,
float* output) {
{ // run MV
const long n_lines_on_grid = 64; //TODO: tune
dim3 threads(MV_BUFFER_SIZE);
dim3 blocks(min((int)n_lines_on_grid, (int)n_max_class_per_cluster),
minibatch_size);
updateOutputWithTargetMV<<<blocks, threads, 0, stream>>>(
input, class_weight, class_bias, mapping, n_class_in_cluster,
class_start_indices, target, input_strides[0], class_weight_strides[0],
class_score_strides[0], input_size, class_score);
}
{ // run logsoftmax
dim3 blocks(minibatch_size, 2);
dim3 threads(LSM_BUFFER_SIZE);
updateOutputWithTargetLSM<<<blocks, threads, 0, stream>>>(
target, mapping, n_class_in_cluster, class_score, class_logsum,
cluster_score, cluster_logsum, class_score_strides[0],
cluster_score_strides[0], n_clusters, output);
}
}
void launchUpdateGradInput(
cudaStream_t stream,
const float* class_weight,
const float* mapping,
const float* n_class_in_cluster,
const float* class_start_indices,
const float* target,
const long* gradInput_strides,
const long* class_weight_strides,
const long* class_score_strides,
const long* cluster_score_strides,
const long input_size,
const long minibatch_size,
const long n_max_class_per_cluster,
const long n_clusters,
float* class_score,
float* class_logsum,
float* cluster_score,
float* cluster_logsum,
float* gradInput) {
cudaMemsetAsync(gradInput, 0, input_size * minibatch_size * sizeof(float), stream);
{ // bprop in logsoftmax
dim3 blocks(minibatch_size, 2);
dim3 threads(128); //TODO: tune
updateGradInputLSM<<<blocks, threads, 0, stream>>>(
target, mapping, n_class_in_cluster, class_score, class_logsum,
cluster_score, cluster_logsum, class_score_strides[0],
cluster_score_strides[0], n_clusters);
}
{
dim3 threads(MV2_NLINES);
dim3 blocks(32, minibatch_size); //TODO: tune
updateGradInputMV<<<blocks, threads, 0, stream>>>(
class_score, class_weight, mapping, n_class_in_cluster,
class_start_indices, target, gradInput_strides[0],
class_weight_strides[0], class_score_strides[0],
input_size, gradInput);
}
}
void launchAccGradParameters(
cudaStream_t stream,
const float* class_score,
const float* mapping,
const float* n_class_in_cluster,
const float* class_start_indices,
const float* target,
const float* input,
const long* input_strides,
const long* class_score_strides,
const long* class_gradWeight_strides,
const long input_size,
const long minibatch_size,
const long n_max_class_per_cluster,
const float scale,
float* class_gradWeight,
float* class_gradBias) {
dim3 blocks(minibatch_size, 4); //TODO: tune
dim3 threads(32, 4);
const size_t shared_mem_size =
(input_size + DIVUP(n_max_class_per_cluster, blocks.y)) * sizeof(float);
if (shared_mem_size > SHARED_MEM_MAX_SIZE) {
printf("HSM: not enough shared memory. Reduce input size and/or number \
of class in the largest cluster\n");
exit(0);
}
accGradParameters<<<blocks, threads, shared_mem_size, stream>>>(
mapping, n_class_in_cluster, class_start_indices, target, input,
class_score, input_size, input_strides[0], class_score_strides[0],
class_gradWeight_strides[0], scale, class_gradWeight,
class_gradBias);
}
}}}}
|
the_stack
|
#define DETERMINISTIC
//#define DEBUG
//#define DYNAMIC
#define NUM_NODE_SIZES 64 ///< number of different cell sizes
#include "utility/src/utils.cuh"
// database dependency
#include "utility/src/detailed_place_db.cuh"
#include "independent_set_matching/src/construct_spaces.cuh"
#include "independent_set_matching/src/auction.cuh"
// This shared memory version is still buggy;
// it gets very slow at ISPD 2018 test4/5/6 benchmarks and cannot cause illegal memory access
//#include "independent_set_matching/src/auction_shared_memory.cuh"
//#include "independent_set_matching/src/auction_cuda2cpu.cuh"
#include "independent_set_matching/src/maximal_independent_set.cuh"
//#include "independent_set_matching/src/maximal_independent_set_cuda2cpu.cuh"
#include "independent_set_matching/src/cpu_state.cuh"
#include "independent_set_matching/src/collect_independent_sets.cuh"
//#include "independent_set_matching/src/collect_independent_sets_cuda2cpu.cuh"
#include "independent_set_matching/src/cost_matrix_construction.cuh"
//#include "independent_set_matching/src/cost_matrix_construction_cuda2cpu.cuh"
#include "independent_set_matching/src/apply_solution.cuh"
//#include "independent_set_matching/src/apply_solution_cuda2cpu.h"
#include "independent_set_matching/src/shuffle.cuh"
DREAMPLACE_BEGIN_NAMESPACE
struct SizedBinIndex
{
int size_id;
int bin_id;
};
template <typename T>
struct IndependentSetMatchingState
{
typedef T type;
typedef int cost_type;
int* ordered_nodes = nullptr;
////int* node_size_id = nullptr;
Space<T>* spaces = nullptr; ///< array of cell spaces, each cell only consider the space on its left side except for the left and right boundary
int num_node_sizes; ///< number of cell sizes considered
int* independent_sets = nullptr; ///< independent sets, length of batch_size*set_size
int* independent_set_sizes = nullptr; ///< size of each independent set
////int* ordered_independent_sets = nullptr; ///< temporary storage for reordering independent sets, forward mapping
////int* reordered_independent_sets = nullptr; ///< temporary storage for reordering independent sets, reverse mapping
int* selected_maximal_independent_set = nullptr; ///< storing the selected maximum independent set
int* select_scratch = nullptr; ///< temporary storage for selection kernel
int num_selected; ///< maximum independent set size
int* device_num_selected; ///< maximum independent set size
////int* device_num_selected_prefix_sum = nullptr; ///< prefix sum for different sizes of cells in the maximum independent set
//int* device_num_clusters_prefix_sum = nullptr; ///< prefix sum of the number of clusters for different cell sizes
////int* node2center = nullptr; ///< map cell to cluster center for kmeans
//int* centers = nullptr; ///< batch_size, cells for centers
////T* center_xs = nullptr; ///< NUM_NODE_SIZES*batch_size, cluster centers of different sizes
////T* center_ys = nullptr; ///< NUM_NODE_SIZES*batch_size, cluster centers of different sizes
//int* cluster_sizes = nullptr; ///< NUM_NODE_SIZES*batch_size, cluster sizes of different cell sizes
double* net_hpwls; ///< HPWL for each net, use integer to get consistent values
int* selected_markers = nullptr; ///< must be int for cub to compute prefix sum
unsigned char* dependent_markers = nullptr;
int* independent_set_empty_flag = nullptr; ///< a stopping flag for maximum independent set
////int* device_num_independent_sets = nullptr; ///< actual number of independent sets
int num_independent_sets; ///< host copy
cost_type* cost_matrices = nullptr; ///< cost matrices batch_size*set_size*set_size
cost_type* cost_matrices_copy = nullptr; ///< temporary copy of cost matrices
int* solutions = nullptr; ///< batch_size*set_size
char* auction_scratch = nullptr; ///< temporary memory for auction solver
char* stop_flags = nullptr; ///< record stopping status from auction solver
T* orig_x = nullptr; ///< original locations of cells for applying solutions
T* orig_y = nullptr;
cost_type* orig_costs = nullptr; ///< original costs
cost_type* solution_costs = nullptr; ///< solution costs
Space<T>* orig_spaces = nullptr; ///< original spaces of cells for apply solutions
int batch_size; ///< pre-allocated number of independent sets
int set_size;
int cost_matrix_size; ///< set_size*set_size
int num_bins; ///< num_bins_x*num_bins_y
int* device_num_moved; ///< device copy
int num_moved; ///< host copy, number of moved cells
int large_number; ///< a large number
float auction_max_eps; ///< maximum epsilon for auction solver
float auction_min_eps; ///< minimum epsilon for auction solver
float auction_factor; ///< decay factor for auction epsilon
int auction_max_iterations; ///< maximum iteration
T skip_threshold; ///< ignore connections if cells are far apart
};
/// @brief A function for debug. Dump out binary data to a file.
template <typename T>
void write(const T* device_data, size_t size, std::string filename)
{
std::ofstream out (filename.c_str(), std::ios::out | std::ios::binary);
dreamplaceAssert(out.good());
dreamplacePrint(kDEBUG, "write to %s size %llu\n", filename.c_str(), size);
std::vector<T> host_data (size);
checkCUDA(cudaMemcpy(host_data.data(), device_data, sizeof(T)*size, cudaMemcpyDeviceToHost));
checkCUDA(cudaDeviceSynchronize());
out.write((char*)&size, sizeof(int));
out.write((char*)host_data.data(), sizeof(T)*host_data.size());
out.close();
}
/// @brief Corresponding read the binary data.
template <typename T>
void read(std::vector<T>& data, const char* filename)
{
std::ifstream in (filename, std::ios::in | std::ios::binary);
assert(in.good());
int size = 0;
in.read((char*)&size, sizeof(size));
data.resize(size);
in.read((char*)data.data(), sizeof(T)*size);
in.close();
}
__global__ void cost_matrix_init(int* cost_matrix, int set_size)
{
for (int i = blockIdx.x; i < set_size; i += gridDim.x)
{
for (int j = threadIdx.x; j < set_size; j += blockDim.x)
{
cost_matrix[i*set_size+j] = (i == j)? 0 : cuda::numeric_limits<int>::max();
}
}
}
template <typename T>
__global__ void print_global(T* a, int n)
{
unsigned int tid = threadIdx.x;
unsigned int bid = blockIdx.x;
if (tid == 0 && bid == 0)
{
printf("[%d]\n", n);
for (int i = 0; i < n; ++i)
{
printf("%g ", (double)a[i]);
}
printf("\n");
}
}
template <typename T>
__global__ void print_cost_matrix(const T* cost_matrix, int set_size, bool major)
{
unsigned int tid = threadIdx.x;
unsigned int bid = blockIdx.x;
if (tid == 0 && bid == 0)
{
printf("[%dx%d]\n", set_size, set_size);
for (int r = 0; r < set_size; ++r)
{
for (int c = 0; c < set_size; ++c)
{
if (major) // column major
{
printf("%g ", (double)cost_matrix[c*set_size+r]);
}
else
{
printf("%g ", (double)cost_matrix[r*set_size+c]);
}
}
printf("\n");
}
printf("\n");
}
}
template <typename T>
__global__ void print_solution(const T* solution, int n)
{
unsigned int tid = threadIdx.x;
unsigned int bid = blockIdx.x;
if (tid == 0 && bid == 0)
{
printf("[%d]\n", n);
for (int i = 0; i < n; ++i)
{
printf("%g ", (double)solution[i]);
}
printf("\n");
}
}
template <typename T>
int independentSetMatchingCUDALauncher(DetailedPlaceDB<T> db,
int batch_size, int set_size, int max_iters, int num_threads)
{
//size_t printf_size = 0;
//cudaDeviceGetLimit(&printf_size,cudaLimitPrintfFifoSize);
//cudaDeviceSetLimit(cudaLimitPrintfFifoSize, printf_size*10);
// fix random seed
std::srand(1000);
//const double threshold = 0.0001;
CPUTimer::hr_clock_rep timer_start, timer_stop;
CPUTimer::hr_clock_rep kernel_timer_start, kernel_timer_stop;
CPUTimer::hr_clock_rep total_timer_start, total_timer_stop;
total_timer_start = CPUTimer::getGlobaltime();
IndependentSetMatchingState<T> state;
// initialize host database
DetailedPlaceCPUDB<T> host_db;
init_cpu_db(db, host_db);
state.batch_size = batch_size;
state.set_size = set_size;
state.cost_matrix_size = state.set_size*state.set_size;
state.num_bins = db.num_bins_x*db.num_bins_y;
state.num_moved = 0;
state.large_number = ((db.xh-db.xl) + (db.yh-db.yl))*set_size;
state.skip_threshold = ((db.xh-db.xl) + (db.yh-db.yl))*0.01;
state.auction_max_eps = 10.0;
state.auction_min_eps = 1.0;
state.auction_factor = 0.1;
state.auction_max_iterations = 9999;
////timer_start = CPUTimer::getGlobaltime();
////std::map<int, int> size2num_node_map; ///< number of cells with different sizes
////std::map<int, int> size2id_map; ///< map width of a cell to an index
////std::vector<T> host_x (db.num_nodes);
////std::vector<T> host_y (db.num_nodes);
checkCUDA(cudaMemcpy(host_db.x.data(), db.x, sizeof(T)*db.num_nodes, cudaMemcpyDeviceToHost));
checkCUDA(cudaMemcpy(host_db.y.data(), db.y, sizeof(T)*db.num_nodes, cudaMemcpyDeviceToHost));
/////std::vector<T> host_node_size_x (db.num_nodes);
/////std::vector<T> host_node_size_y (db.num_nodes);
/////checkCUDA(cudaMemcpy(host_node_size_x.data(), db.node_size_x, sizeof(T)*db.num_nodes, cudaMemcpyDeviceToHost));
/////checkCUDA(cudaMemcpy(host_node_size_y.data(), db.node_size_y, sizeof(T)*db.num_nodes, cudaMemcpyDeviceToHost));
////std::vector<int> host_node_size_id (db.num_movable_nodes, std::numeric_limits<int>::max());
////std::vector<SizedBinIndex> host_thread2bin_map;
////std::vector<int> host_ordered_nodes (db.num_movable_nodes);
////std::iota(host_ordered_nodes.begin(), host_ordered_nodes.end(), 0);
std::vector<Space<T> > host_spaces (db.num_movable_nodes);
construct_spaces(db, host_db.x.data(), host_db.y.data(), host_db.node_size_x.data(), host_db.node_size_y.data(), host_spaces, num_threads);
////// initialize size information
////{
//// for (int i = 0; i < db.num_movable_nodes; ++i)
//// {
//// if (host_node_size_y[i] == db.row_height)
//// {
//// int width = (int)ceil(host_node_size_x[i]/db.site_width);
//// if (size2num_node_map.count(width))
//// {
//// size2num_node_map[width] += 1;
//// }
//// else
//// {
//// size2num_node_map[width] = 1;
//// }
//// }
//// }
//// int size_id = 0;
//// for (auto kv : size2num_node_map)
//// {
//// if (kv.second < state.set_size || size_id >= NUM_NODE_SIZES)
//// {
//// dreamplacePrint(kINFO, "ignore %d cells of width %d\n", kv.second, kv.first);
//// continue;
//// }
//// size2id_map[kv.first] = size_id;
//// dreamplacePrint(kINFO, "map %d cells of width %d to %d\n", kv.second, kv.first, size_id);
//// ++size_id;
//// }
//// state.num_node_sizes = size_id;
//// dreamplacePrint(kINFO, "consider %d kinds of cell sizes\n", state.num_node_sizes);
//// for (int i = 0; i < db.num_movable_nodes; ++i)
//// {
//// if (host_node_size_y[i] == db.row_height)
//// {
//// int width = (int)ceil(host_node_size_x[i]/db.site_width);
//// if (size2id_map.count(width))
//// {
//// int size_id = size2id_map.at(width);
//// host_node_size_id[i] = size_id;
//// }
//// }
//// }
////}
////timer_stop = CPUTimer::getGlobaltime();
////dreamplacePrint(kINFO, "initializing cell size categories takes %g ms\n", CPUTimer::getTimerPeriod()*(timer_stop-timer_start));
// initialize cuda state
timer_start = CPUTimer::getGlobaltime();
{
////allocateCopyCUDA(state.node_size_id, host_node_size_id.data(), db.num_movable_nodes);
allocateCopyCUDA(state.spaces, host_spaces.data(), db.num_movable_nodes);
allocateCUDA(state.ordered_nodes, db.num_movable_nodes, int);
iota<<<ceilDiv(db.num_movable_nodes, 512), 512>>>(state.ordered_nodes, db.num_movable_nodes);
allocateCUDA(state.independent_sets, state.batch_size*state.set_size, int);
allocateCUDA(state.independent_set_sizes, state.batch_size, int);
////allocateCUDA(state.ordered_independent_sets, state.batch_size, int);
////allocateCUDA(state.reordered_independent_sets, state.batch_size, int);
allocateCUDA(state.selected_maximal_independent_set, db.num_movable_nodes, int);
allocateCUDA(state.select_scratch, db.num_movable_nodes, int);
allocateCUDA(state.device_num_selected, 1, int);
////allocateCUDA(state.device_num_selected_prefix_sum, NUM_NODE_SIZES+1, int);
//allocateCUDA(state.device_num_clusters_prefix_sum, NUM_NODE_SIZES+1, int);
////allocateCUDA(state.node2center, db.num_movable_nodes, int);
//allocateCUDA(state.centers, state.batch_size, int);
////allocateCUDA(state.center_xs, state.batch_size*NUM_NODE_SIZES, T);
////allocateCUDA(state.center_ys, state.batch_size*NUM_NODE_SIZES, T);
//allocateCUDA(state.cluster_sizes, state.batch_size*NUM_NODE_SIZES, T);
////allocateCUDA(state.device_num_independent_sets, 1, int);
allocateCUDA(state.orig_x, state.batch_size*state.set_size, T);
allocateCUDA(state.orig_y, state.batch_size*state.set_size, T);
allocateCUDA(state.orig_spaces, state.batch_size*state.set_size, Space<T>);
allocateCUDA(state.selected_markers, db.num_nodes, int);
allocateCUDA(state.dependent_markers, db.num_nodes, unsigned char);
allocateCUDA(state.independent_set_empty_flag, 1, int);
allocateCUDA(state.cost_matrices, state.batch_size*state.set_size*state.set_size, typename IndependentSetMatchingState<T>::cost_type);
allocateCUDA(state.cost_matrices_copy, state.batch_size*state.set_size*state.set_size, typename IndependentSetMatchingState<T>::cost_type);
allocateCUDA(state.solutions, state.batch_size*state.set_size, int);
allocateCUDA(state.orig_costs, state.batch_size*state.set_size, typename IndependentSetMatchingState<T>::cost_type);
allocateCUDA(state.solution_costs, state.batch_size*state.set_size, typename IndependentSetMatchingState<T>::cost_type);
allocateCUDA(state.net_hpwls, db.num_nets, typename std::remove_pointer<decltype(state.net_hpwls)>::type);
allocateCopyCUDA(state.device_num_moved, &state.num_moved, 1);
init_auction<T>(state.batch_size, state.set_size, state.auction_scratch, state.stop_flags);
}
Shuffler<int, unsigned int> shuffler (1234ULL, state.ordered_nodes, db.num_movable_nodes);
// initialize host state
IndependentSetMatchingCPUState<T> host_state;
init_cpu_state(db, state, host_state);
// initialize kmeans state
KMeansState<T> kmeans_state;
init_kmeans(db, state, kmeans_state);
timer_stop = CPUTimer::getGlobaltime();
dreamplacePrint(kINFO, "initializing GPU memory takes %g ms\n", CPUTimer::getTimerPeriod()*(timer_stop-timer_start));
kernel_timer_start = CPUTimer::getGlobaltime();
// runtime profiling
CPUTimer::hr_clock_rep iter_timer_start, iter_timer_stop;
int random_shuffle_runs = 0, maximal_independent_set_runs = 0, collect_independent_sets_runs = 0,
cost_matrix_construction_runs = 0, independent_sets_solving_runs = 0, apply_solution_runs = 0;
CPUTimer::hr_clock_rep random_shuffle_time = 0, maximal_independent_set_time = 0, collect_independent_sets_time = 0,
cost_matrix_construction_time = 0, independent_sets_solving_time = 0, apply_solution_time = 0;
std::vector<T> hpwls (max_iters+1);
hpwls[0] = compute_total_hpwl(db, db.x, db.y, state.net_hpwls);
dreamplacePrint(kINFO, "initial hpwl %g\n", hpwls[0]);
for (int iter = 0; iter < max_iters; ++iter)
{
iter_timer_start = CPUTimer::getGlobaltime();
timer_start = CPUTimer::getGlobaltime();
//std::random_shuffle(host_ordered_nodes.begin(), host_ordered_nodes.end());
//checkCUDA(cudaMemcpy(state.ordered_nodes, host_ordered_nodes.data(), sizeof(int)*db.num_movable_nodes, cudaMemcpyHostToDevice));
shuffler();
checkCUDA(cudaDeviceSynchronize());
timer_stop = CPUTimer::getGlobaltime();
random_shuffle_time += timer_stop-timer_start;
random_shuffle_runs += 1;
timer_start = CPUTimer::getGlobaltime();
maximal_independent_set(db, state);
checkCUDA(cudaDeviceSynchronize());
timer_stop = CPUTimer::getGlobaltime();
maximal_independent_set_time += timer_stop-timer_start;
maximal_independent_set_runs += 1;
timer_start = CPUTimer::getGlobaltime();
collect_independent_sets(db, state, kmeans_state, host_db, host_state);
//collect_independent_sets_cuda2cpu(db, state);
// better copy here, because state is passed as a copy.
// there will not be any effect if copied inside any function
////checkCUDA(cudaMemcpy(&state.num_independent_sets, state.device_num_independent_sets, sizeof(int), cudaMemcpyDeviceToHost));
checkCUDA(cudaDeviceSynchronize());
timer_stop = CPUTimer::getGlobaltime();
collect_independent_sets_time += timer_stop-timer_start;
collect_independent_sets_runs += 1;
timer_start = CPUTimer::getGlobaltime();
cost_matrix_construction(db, state);
checkCUDA(cudaDeviceSynchronize());
timer_stop = CPUTimer::getGlobaltime();
cost_matrix_construction_time += timer_stop-timer_start;
cost_matrix_construction_runs += 1;
// solve independent sets
//state.num_independent_sets = 4;
//print_cost_matrix<<<1, 1>>>(state.cost_matrices + state.cost_matrix_size*3, state.set_size, 0);
timer_start = CPUTimer::getGlobaltime();
linear_assignment_auction(
state.cost_matrices,
state.solutions,
state.num_independent_sets,
state.set_size,
state.auction_scratch,
state.stop_flags,
state.auction_max_eps,
state.auction_min_eps,
state.auction_factor,
state.auction_max_iterations
);
checkCUDA(cudaDeviceSynchronize());
timer_stop = CPUTimer::getGlobaltime();
independent_sets_solving_time += timer_stop-timer_start;
independent_sets_solving_runs += 1;
//print_solution<<<1, 1>>>(state.solutions + state.set_size*3, state.set_size);
// apply solutions
timer_start = CPUTimer::getGlobaltime();
apply_solution(db, state);
checkCUDA(cudaDeviceSynchronize());
timer_stop = CPUTimer::getGlobaltime();
apply_solution_time += timer_stop-timer_start;
apply_solution_runs += 1;
iter_timer_stop = CPUTimer::getGlobaltime();
hpwls[iter+1] = compute_total_hpwl(db, db.x, db.y, state.net_hpwls);
if ((iter%(max(max_iters/10, 1))) == 0 || iter+1 == max_iters)
{
dreamplacePrint(kINFO, "iteration %d, target hpwl %g, delta %g(%g%%), %d independent sets, moved %g%% cells, runtime %g ms\n",
iter,
hpwls[iter+1], hpwls[iter+1]-hpwls[0], (hpwls[iter+1]-hpwls[0])/hpwls[0]*100,
state.num_independent_sets,
state.num_moved/(double)db.num_movable_nodes*100,
CPUTimer::getTimerPeriod()*(iter_timer_stop-iter_timer_start)
);
}
}
kernel_timer_stop = CPUTimer::getGlobaltime();
dreamplacePrint(kDEBUG, "random_shuffle takes %g ms, %d runs, average %g ms\n",
CPUTimer::getTimerPeriod()*random_shuffle_time, random_shuffle_runs, CPUTimer::getTimerPeriod()*random_shuffle_time/random_shuffle_runs);
dreamplacePrint(kDEBUG, "maximal_independent_set takes %g ms, %d runs, average %g ms\n",
CPUTimer::getTimerPeriod()*maximal_independent_set_time, maximal_independent_set_runs, CPUTimer::getTimerPeriod()*maximal_independent_set_time/maximal_independent_set_runs);
dreamplacePrint(kDEBUG, "collect_independent_sets takes %g ms, %d runs, average %g ms\n",
CPUTimer::getTimerPeriod()*collect_independent_sets_time, collect_independent_sets_runs, CPUTimer::getTimerPeriod()*collect_independent_sets_time/collect_independent_sets_runs);
dreamplacePrint(kDEBUG, "cost_matrix_construction takes %g ms, %d runs, average %g ms\n",
CPUTimer::getTimerPeriod()*cost_matrix_construction_time, cost_matrix_construction_runs, CPUTimer::getTimerPeriod()*cost_matrix_construction_time/cost_matrix_construction_runs);
dreamplacePrint(kDEBUG, "independent_sets_solving takes %g ms, %d runs, average %g ms\n",
CPUTimer::getTimerPeriod()*independent_sets_solving_time, independent_sets_solving_runs, CPUTimer::getTimerPeriod()*independent_sets_solving_time/independent_sets_solving_runs);
dreamplacePrint(kDEBUG, "apply_solution takes %g ms, %d runs, average %g ms\n",
CPUTimer::getTimerPeriod()*apply_solution_time, apply_solution_runs, CPUTimer::getTimerPeriod()*apply_solution_time/apply_solution_runs);
// destroy state
timer_start = CPUTimer::getGlobaltime();
{
////destroyCUDA(state.node_size_id);
destroyCUDA(state.spaces);
destroyCUDA(state.ordered_nodes);
destroyCUDA(state.independent_sets);
destroyCUDA(state.independent_set_sizes);
////destroyCUDA(state.ordered_independent_sets);
////destroyCUDA(state.reordered_independent_sets);
destroyCUDA(state.selected_maximal_independent_set);
destroyCUDA(state.select_scratch);
destroyCUDA(state.device_num_selected);
////destroyCUDA(state.device_num_selected_prefix_sum);
//destroyCUDA(state.device_num_clusters_prefix_sum);
////destroyCUDA(state.node2center);
//destroyCUDA(state.centers);
////destroyCUDA(state.center_xs);
////destroyCUDA(state.center_ys);
//destroyCUDA(state.cluster_sizes);
destroyCUDA(state.net_hpwls);
destroyCUDA(state.cost_matrices);
destroyCUDA(state.cost_matrices_copy);
destroyCUDA(state.solutions);
destroyCUDA(state.orig_costs);
destroyCUDA(state.solution_costs);
destroyCUDA(state.orig_x);
destroyCUDA(state.orig_y);
destroyCUDA(state.orig_spaces);
destroyCUDA(state.selected_markers);
destroyCUDA(state.dependent_markers);
destroyCUDA(state.independent_set_empty_flag);
////destroyCUDA(state.device_num_independent_sets);
destroyCUDA(state.device_num_moved);
destroy_auction(state.auction_scratch, state.stop_flags);
// destroy kmeans state
destroy_kmeans(kmeans_state);
}
timer_stop = CPUTimer::getGlobaltime();
dreamplacePrint(kINFO, "destroying GPU memory takes %g ms\n", CPUTimer::getTimerPeriod()*(timer_stop-timer_start));
total_timer_stop = CPUTimer::getGlobaltime();
dreamplacePrint(kINFO, "Kernel time %g ms\n", CPUTimer::getTimerPeriod()*(kernel_timer_stop-kernel_timer_start));
dreamplacePrint(kINFO, "Independent set matching time %g ms\n", CPUTimer::getTimerPeriod()*(total_timer_stop-total_timer_start));
return 0;
}
#define REGISTER_KERNEL_LAUNCHER(T) \
template int independentSetMatchingCUDALauncher<T>(\
DetailedPlaceDB<T> db, \
int batch_size, \
int set_size, \
int max_iters, \
int num_threads \
);
REGISTER_KERNEL_LAUNCHER(float);
REGISTER_KERNEL_LAUNCHER(double);
DREAMPLACE_END_NAMESPACE
|
the_stack
|
#pragma once
#include "Common.h"
#include "CudaConstraints.h"
#include "CudaData.cuh"
#include <Eigen/SVD>
namespace viper {
struct C_distance_solve {
C_distance_solve(C_distance *C, CudaStatePtr S, CudaProjectionsPtr P,
float *L, float dt)
: C(C), S(S), P(P), L(L), dt(dt) {}
C_distance *C;
CudaStatePtr S;
CudaProjectionsPtr P;
float *L;
float dt;
__host__ __device__ void operator()(int i) const {
int a = C[i].a;
int b = C[i].b;
Vec3 ab = S.x[b] - S.x[a];
float dist = ab.norm();
Vec3 n = -ab / dist;
float alpha = C[i].compliance / (dt * dt);
float c = dist - C[i].rDist;
float dL = (-c - alpha * L[i]) / (S.w[a] + S.w[b] + alpha);
Vec3 corr = dL * n;
L[i] += dL;
P.id[2 * i + 0] = a;
P.id[2 * i + 1] = b;
P.dx[2 * i + 0].head<3>() = S.w[a] * corr;
P.dx[2 * i + 1].head<3>() = -S.w[b] * corr;
P.dx[2 * i + 0][4] = 1.f;
P.dx[2 * i + 1][4] = 1.f;
}
};
struct C_distancemax_solve {
C_distancemax_solve(C_distancemax *C, CudaStatePtr S, CudaProjectionsPtr P)
: C(C), S(S), P(P) {}
C_distancemax *C;
CudaStatePtr S;
CudaProjectionsPtr P;
__host__ __device__ void operator()(int i) const {
int a = C[i].a;
int b = C[i].b;
Vec3 ab = S.x[b] - S.x[a];
float dist = ab.norm();
Vec3 n = ab / dist;
Vec3 corr = (dist - C[i].max_distance) / (S.w[a] + S.w[b]) * n;
float s = dist > C[i].max_distance;
P.id[2 * i + 0] = a;
P.id[2 * i + 1] = b;
P.dx[2 * i + 0].head<3>() = S.w[a] * corr * s;
P.dx[2 * i + 1].head<3>() = -S.w[b] * corr * s;
P.dx[2 * i + 0][4] = s;
P.dx[2 * i + 1][4] = s;
}
};
struct C_skinning_solve {
C_skinning_solve(C_skinning *C, CudaStatePtr S) : C(C), S(S) {}
C_skinning *C;
CudaStatePtr S;
__host__ __device__ Vec4 toH(Vec3 v) const {
return Vec4(v[0], v[1], v[2], 1.f);
}
__host__ __device__ void operator()(int i) const {
Matrix4 T0 = Matrix4::Identity();
Matrix4 T1 = Matrix4::Identity();
Matrix4 T0i = Matrix4::Identity();
Matrix4 T1i = Matrix4::Identity();
if (C[i].t0 != -1) {
T0 = S.b[C[i].t0];
T0i = S.bp[C[i].t0];
}
if (C[i].t1 != -1) {
T1 = S.b[C[i].t1];
T1i = S.bp[C[i].t1];
}
Vec3 p = S.x[C[i].i];
Vec3 proj0 = (T0 * T0i * toH(p)).head<3>();
Vec3 proj1 = (T1 * T1i * toH(p)).head<3>();
Vec3 proj = C[i].w0 * proj0 + C[i].w1 * proj1;
if (S.w[C[i].i] > 0) {
S.x[C[i].i] = proj;
S.xp[C[i].i] += proj - p;
}
}
};
struct C_collpp_solve {
C_collpp_solve(C_collpp *C, CudaStatePtr S, CudaProjectionsPtr P)
: C(C), S(S), P(P) {}
C_collpp *C;
CudaStatePtr S;
CudaProjectionsPtr P;
__host__ __device__ void operator()(int i) const {
int a = C[i].a;
int b = C[i].b;
Vec3 ab = S.x[b] - S.x[a];
float d = ab.norm();
float R = S.r[a] + S.r[b];
Vec3 n = ab / d;
float s = d < R;
Vec3 corr = (d - R) / (S.w[a] + S.w[b]) * n * s;
P.id[2 * i + 0] = a;
P.id[2 * i + 1] = b;
P.dx[2 * i + 0].head<3>() = S.w[a] * corr;
P.dx[2 * i + 1].head<3>() = -S.w[b] * corr;
P.dx[2 * i + 0][4] = s;
P.dx[2 * i + 1][4] = s;
}
};
struct C_volume_solve {
C_volume_solve(C_volume *C, CudaStatePtr S, CudaProjectionsPtr P, float *L,
float dt)
: C(C), S(S), P(P), Lambda(L), dt(dt) {}
C_volume *C;
CudaStatePtr S;
CudaProjectionsPtr P;
float *Lambda;
float dt;
__host__ __device__ void operator()(int i) const {
int a = C[i].a;
int b = C[i].b;
float ra = S.r[a];
float rb = S.r[b];
float Vr = C[i].Vr;
Vec3 ba = S.x[a] - S.x[b];
float d = ba.norm();
float e = (rb - ra) / d;
float L = d + (ra - rb) * e;
float e2 = e * e;
float e3 = e2 * e;
float e4 = e3 * e;
float ra2 = ra * ra;
float rb2 = rb * rb;
float ra3 = ra2 * ra;
float rb3 = rb2 * rb;
float rarb = (ra2 + ra * rb + rb2);
float gpa_n = (M_PIf * (e - e3) * (ra3 - rb3) / d +
M_PIf / 3.f * rarb * (1.f - 3.f * e4 + 2.f * e2)) /
Vr;
float gra =
(M_PIf * ra2 * (ra / d * (1.f - e2) + e3 - 3.f * e) +
M_PIf * rb3 / d * (e2 - 1.f) +
M_PIf / 3.f *
(+4.f * rarb * (e - e3) + L * (1.f - e2) * (2.f * ra + rb))) /
Vr;
float grb =
(M_PIf * rb2 * (rb / d * (1.f - e2) + 3.f * e - e3) +
M_PIf * ra3 / d * (e2 - 1.f) +
M_PIf / 3.f *
(-4.f * rarb * (e - e3) + L * (1.f - e2) * (ra + 2.f * rb))) /
Vr;
Vec3 ban = ba / d;
float wSum = (S.w[a] + S.w[b]) * gpa_n * gpa_n + S.wr[a] * gra * gra +
S.wr[b] * grb * grb;
// float wSum = S.wr[a] * gra * gra + S.wr[b] * grb * grb;
float V = M_PIf / 3.0f *
((ra3 - rb3) * (e3 - 3.f * e) + L * (1.f - e2) * rarb);
float c = V / Vr - 1.f;
float alpha = C[i].compliance / (dt * dt);
float dL = (-c - alpha * Lambda[i]) / (wSum + alpha);
Vec3 dpa = gpa_n * ban * dL;
float dra = gra * dL;
float drb = grb * dL;
P.id[2 * i + 0] = a;
P.id[2 * i + 1] = b;
P.dx[2 * i + 0].head<3>() = S.w[a] * dpa;
P.dx[2 * i + 1].head<3>() = -S.w[b] * dpa;
P.dx[2 * i + 0][3] = S.wr[a] * dra;
P.dx[2 * i + 1][3] = S.wr[b] * drb;
P.dx[2 * i + 0].tail<2>() = Vec2::Ones();
P.dx[2 * i + 1].tail<2>() = Vec2::Ones();
Lambda[i] += dL;
// printf("V/Vr: %f\n", V / Vr);
}
};
struct C_volume2_solve {
C_volume2_solve(C_volume2 *C, CudaStatePtr S, CudaProjectionsPtr P,
float *L, float dt)
: C(C), S(S), P(P), Lambda(L), dt(dt) {}
C_volume2 *C;
CudaStatePtr S;
CudaProjectionsPtr P;
float *Lambda;
float dt;
__host__ __device__ void operator()(int i) const {
int a = C[i].a;
int b = C[i].b;
int c = C[i].c;
float s = 0.5f * (S.r[a] + S.r[b]);
float s0 = C[i].r0;
float l0 = C[i].l0;
Vec3 ab = S.x[b] - S.x[a];
Quaternion qc = S.q[c];
Quaternion q = S.q[c];
Vec3 R3 = ab.normalized();
float r3ab = R3.dot(ab);
float X = s * s * r3ab - s0 * s0 * l0;
float W = X;
Vec3 gp = -s * s * R3;
float gr = s * r3ab;
Vec4 gq = Vec4::Zero();
float wSum = (S.wr[a] + S.wr[b]) * gr * gr +
(S.w[a] + S.w[b]) * gp.squaredNorm() +
S.wq[c] * gq.squaredNorm() + 1e-6f;
float lambda = W / wSum;
Vec3 dp = -lambda * gp;
float dr = -lambda * gr;
Vec4 dq = -lambda * gq;
float w = 1.0f;
P.id[3 * i + 0] = a;
P.id[3 * i + 1] = b;
P.id[3 * i + 2] = c;
P.dx[3 * i + 0].head<3>() = S.w[a] * dp * w;
P.dx[3 * i + 1].head<3>() = -S.w[b] * dp * w;
P.dx[3 * i + 0][3] = S.wr[a] * dr * w;
P.dx[3 * i + 1][3] = S.wr[b] * dr * w;
P.dx[3 * i + 2].head<4>() = S.wq[c] * dq * w;
P.dx[3 * i + 0].tail<2>() = Vec2::Ones() * w;
P.dx[3 * i + 1].tail<2>() = Vec2::Ones() * w;
P.dx[3 * i + 2][4] = 1.f * w;
}
};
struct C_bend_solve {
C_bend_solve(C_bend *C, CudaStatePtr S, CudaProjectionsPtr P, int N,
float *L, float dt)
: C(C), S(S), P(P), N(N), L(L), dt(dt) {}
C_bend *C;
CudaStatePtr S;
CudaProjectionsPtr P;
int N;
float *L;
float dt;
__device__ void operator()(int i) const {
Quaternion qa = S.q[C[i].a];
Quaternion qb = S.q[C[i].b];
float wa = S.wq[C[i].a];
float wb = S.wq[C[i].b];
Quaternion omega = qa.conjugate() * qb; // darboux vector
Quaternion omega_plus;
omega_plus.coeffs() =
omega.coeffs() +
C[i].darbouxRest.coeffs(); // delta Omega with -Omega_0
omega.coeffs() =
omega.coeffs() -
C[i].darbouxRest.coeffs(); // delta Omega with + omega_0
if (omega.squaredNorm() > omega_plus.squaredNorm())
omega = omega_plus;
Vec3 lambda = Vec3(L[3 * i + 0], L[3 * i + 1], L[3 * i + 2]);
float alpha = C[i].compliance / (dt * dt);
Vec3 Cx = Vec3(omega.x(), omega.y(), omega.z());
Vec3 dL = (-Cx - alpha * lambda) / (wa + wb + alpha);
Quaternion dLq = Quaternion(0.f, dL[0], dL[1], dL[2]);
Quaternion da = qb * dLq;
Quaternion db = qa * dLq;
da.coeffs() *= -wa;
db.coeffs() *= wb;
float w = 1.f;
P.id[2 * i + 0] = C[i].a + N;
P.id[2 * i + 1] = C[i].b + N;
P.dx[2 * i + 0].head<4>() = da.coeffs() * w;
P.dx[2 * i + 1].head<4>() = db.coeffs() * w;
P.dx[2 * i + 0][4] = w;
P.dx[2 * i + 1][4] = w;
L[3 * i + 0] += dL[0];
L[3 * i + 1] += dL[1];
L[3 * i + 2] += dL[2];
}
};
struct C_stretch_solve {
C_stretch_solve(C_stretch *C, CudaStatePtr S, CudaProjectionsPtr P, int N,
float *L, float dt)
: C(C), S(S), P(P), N(N), L(L), dt(dt) {}
C_stretch *C;
CudaStatePtr S;
CudaProjectionsPtr P;
int N;
float *L;
float dt;
__device__ void operator()(int i) const {
{
int a = C[i].a;
int b = C[i].b;
int c = C[i].c;
const Quaternion &qc = S.q[c];
float wa = S.w[a];
float wb = S.w[b];
float wq = S.wq[c];
Vec3 d3;
d3[0] = 2.0 * (qc.x() * qc.z() + qc.w() * qc.y());
d3[1] = 2.0 * (qc.y() * qc.z() - qc.w() * qc.x());
d3[2] = qc.w() * qc.w() - qc.x() * qc.x() - qc.y() * qc.y() +
qc.z() * qc.z();
Vec3 Cx = (S.x[b] - S.x[a]) / C[i].L - d3;
Vec3 lambda = Vec3(L[i * 3 + 0], L[i * 3 + 1], L[i * 3 + 2]);
float alpha = C[i].compliance / (dt * dt);
float l2 = C[i].L * C[i].L;
Vec3 dL = (-Cx - alpha * lambda) * l2 /
(wa + wb + 4 * l2 * wq + alpha * l2);
Vec3 dp = dL / C[i].L;
Quaternion q_e_3_bar(qc.z(), -qc.y(), qc.x(),
-qc.w()); // compute q*e_3.conjugate (cheaper
// than quaternion product)
Vec3 dq_v = -2.f * wq * dL;
Quaternion dq =
Quaternion(0.0, dq_v.x(), dq_v.y(), dq_v.z()) * q_e_3_bar;
if (C[i].L < 1e-6f)
printf("i: %d L: %f abc: %d %d %d \n", i, C[i].L, a, b, c);
P.id[3 * i + 0] = a;
P.id[3 * i + 1] = b;
P.dx[3 * i + 0].head<3>() = -wa * dp;
P.dx[3 * i + 1].head<3>() = wb * dp;
P.dx[3 * i + 0][4] = 1.f;
P.dx[3 * i + 1][4] = 1.f;
P.id[3 * i + 2] = c + N;
P.dx[3 * i + 2].head<4>() = dq.coeffs();
P.dx[3 * i + 2][4] = 1.f;
L[3 * i + 0] += dL[0];
L[3 * i + 1] += dL[1];
L[3 * i + 2] += dL[2];
}
}
};
struct C_radius_solve {
C_radius_solve(C_radius *C, CudaStatePtr S, CudaProjectionsPtr P, float *L,
float dt)
: C(C), S(S), P(P), L(L), dt(dt) {}
C_radius *C;
CudaStatePtr S;
CudaProjectionsPtr P;
float *L;
float dt;
__device__ void operator()(int i) const {
int id = C[i].a;
float w = S.wr[id];
float r0 = C[i].r;
float alpha = C[i].compliance / (dt * dt);
float c = S.r[id] / r0 - 1.0f;
float dL = (-c - alpha * L[i]) / (w / (r0 * r0) + alpha);
float dr = w / r0 * dL;
if (w < 1e-6f)
return;
P.id[i] = id;
P.dx[i][3] = dr;
P.dx[i][4] = 1.f;
L[i] += dL;
}
};
struct C_touch_solve {
C_touch_solve(C_touch *C, CudaStatePtr S, CudaProjectionsPtr P, float *L,
float dt)
: C(C), S(S), P(P), L(L), dt(dt) {}
C_touch *C;
CudaStatePtr S;
CudaProjectionsPtr P;
float *L;
float dt;
__device__ void operator()(int i) const {
int a = C[i].a;
int b = C[i].b;
Vec3 ba = S.x[a] - S.x[b];
float dist = ba.norm();
float c = dist - S.r[a] - S.r[b];
float wSum = S.w[a] + S.w[b] + S.wr[a] + S.wr[b];
Vec3 dx = -c / wSum * ba.normalized();
float dr = c / wSum;
float w = 1.f;
P.id[2 * i + 0] = C[i].a;
P.id[2 * i + 1] = C[i].b;
P.dx[2 * i + 0].head<3>() = S.w[a] * dx * w;
P.dx[2 * i + 1].head<3>() = -S.w[b] * dx * w;
P.dx[2 * i + 0][3] = S.wr[a] * dr * w;
P.dx[2 * i + 1][3] = S.wr[b] * dr * w;
P.dx[2 * i + 0][4] = w;
P.dx[2 * i + 1][4] = w;
P.dx[2 * i + 0][5] = w;
P.dx[2 * i + 1][5] = w;
}
};
struct C_bilap_solve {
C_bilap_solve(C_bilap *C, CudaStatePtr S, CudaProjectionsPtr P, float *L,
float dt)
: C(C), S(S), P(P), L(L), dt(dt) {}
C_bilap *C;
CudaStatePtr S;
CudaProjectionsPtr P;
float *L;
float dt;
__device__ void operator()(int i) const {
int id = C[i].ids[2];
float r = 0.f;
for (int k = 0; k < 5; k++) {
if (k == 2)
continue;
r -= C[i].w[k] * S.r[C[i].ids[k]];
}
r /= C[i].w[2];
float alpha = C[i].compliance / (dt * dt);
float c = S.r[id] - r;
float dL = (-c - alpha * L[i]) / (S.w[id] + alpha);
float dr = S.w[id] * dL;
if (S.wr[id] < 1e-6f)
return;
float w = 1.f;
P.id[i] = id;
P.dx[i][3] = dr * w;
P.dx[i][5] = w;
L[i] += dL;
}
};
struct C_shape2_solve {
C_shape2_solve(C_shape2 *C, CudaStatePtr S, CudaProjectionsPtr P, int N,
float *L, float dt)
: C(C), S(S), P(P), N(N), L(L), dt(dt) {}
C_shape2 *C;
CudaStatePtr S;
CudaProjectionsPtr P;
int *o;
int N;
float *L;
float dt;
__device__ void operator()(int i) const {
int n = C[i].n;
Matrix4 T_source[SHAPE_MATCHING_MAX];
Matrix4 T_target[SHAPE_MATCHING_MAX];
for (int k = 0; k < n; k++) {
T_target[k] = Matrix4::Identity();
T_target[k].block<3, 3>(0, 0) =
S.r[C[i].id[k]] *
S.q[C[i].qa[k]].slerp(0.5, S.q[C[i].qb[k]]).toRotationMatrix();
T_target[k].col(3).head<3>() = S.x[C[i].id[k]];
T_source[k] = Matrix4::Identity();
T_source[k].block<3, 3>(0, 0) =
S.ri[C[i].id[k]] * S.qi[C[i].qa[k]]
.slerp(0.5, S.qi[C[i].qb[k]])
.toRotationMatrix();
T_source[k].col(3).head<3>() = S.xi[C[i].id[k]];
}
Vec3 mean_source = Vec3::Zero();
Vec3 mean_target = Vec3::Zero();
for (int k = 0; k < n; k++) {
mean_source += T_source[k].col(3).head<3>();
mean_target += T_target[k].col(3).head<3>();
}
mean_source /= n;
mean_target /= n;
for (int k = 0; k < n; k++) {
T_source[k].col(3).head<3>() -= mean_source;
T_target[k].col(3).head<3>() -= mean_target;
}
Matrix3 A = Matrix3::Zero();
for (int k = 0; k < n; k++) {
Eigen::Matrix<float, 3, 4> a = T_target[k].block<3, 4>(0, 0);
Eigen::Matrix<float, 3, 4> b = T_source[k].block<3, 4>(0, 0);
A += a * b.transpose();
}
A = A / (4.0 * n);
for (int iter = 0; iter < 100; iter++) {
Matrix3 R = C[i].q.matrix();
Vec3 omega =
(R.col(0).cross(A.col(0)) + R.col(1).cross(A.col(1)) +
R.col(2).cross(A.col(2))) *
(1.0 / fabs(R.col(0).dot(A.col(0)) + R.col(1).dot(A.col(1)) +
R.col(2).dot(A.col(2))) +
1.0e-9);
float w = (float)omega.norm();
if (w < 1.0e-6f)
break;
C[i].q = Quaternion(Rotation(w, (float)(1.0 / w) * omega)) * C[i].q;
C[i].q.normalize();
}
Matrix3 R = C[i].q.matrix();
float nom = 0.0f;
float den = 0.0f;
for (int k = 0; k < n; k++) {
Eigen::Matrix<float, 3, 4> a = T_source[k].block<3, 4>(0, 0);
Eigen::Matrix<float, 3, 4> b = T_target[k].block<3, 4>(0, 0);
nom += (R * a).cwiseProduct(b).sum();
den += a.cwiseProduct(a).sum();
}
float s = nom / den;
Vec3 Rm = R * mean_source;
Vec3 t = mean_target - s * Rm;
Quaternion qR = Quaternion(R);
int M = 3 * SHAPE_MATCHING_MAX;
t = mean_target - mean_source;
for (int k = 0; k < n; k++) {
int id = C[i].id[k];
int a = C[i].qa[k];
int b = C[i].qb[k];
Vec3 dx = s * R * (S.xi[id] - mean_source) + mean_target - S.x[id];
Vec4 dqa = (C[i].q * S.qi[a]).coeffs() - S.q[a].coeffs();
Vec4 dqb = (C[i].q * S.qi[b]).coeffs() - S.q[b].coeffs();
float dr = s * S.ri[id] - S.r[id];
if (S.w[id] > 1e-6f) {
P.id[i * M + 0 * SHAPE_MATCHING_MAX + k] = id;
P.dx[i * M + 0 * SHAPE_MATCHING_MAX + k].head<3>() = dx;
P.dx[i * M + 0 * SHAPE_MATCHING_MAX + k][3] = dr;
P.dx[i * M + 0 * SHAPE_MATCHING_MAX + k][4] = 1.0f;
P.dx[i * M + 0 * SHAPE_MATCHING_MAX + k][5] = 1.0f;
}
P.id[i * M + 1 * SHAPE_MATCHING_MAX + k] = a + N;
P.dx[i * M + 1 * SHAPE_MATCHING_MAX + k].head<4>() = dqa;
P.dx[i * M + 1 * SHAPE_MATCHING_MAX + k][4] = 1.0f;
P.id[i * M + 2 * SHAPE_MATCHING_MAX + k] = b + N;
P.dx[i * M + 2 * SHAPE_MATCHING_MAX + k].head<4>() = dqb;
P.dx[i * M + 2 * SHAPE_MATCHING_MAX + k][4] = 1.0f;
}
}
};
struct C_shape_solve {
C_shape_solve(C_shape *C, CudaStatePtr S, CudaProjectionsPtr P, float *L,
float dt)
: C(C), S(S), P(P), L(L), dt(dt) {}
C_shape *C;
CudaStatePtr S;
CudaProjectionsPtr P;
int *o;
float *L;
float dt;
__device__ void operator()(int i) const {
int n = C[i].n;
Vec3 center = Vec3::Zero();
for (int k = 0; k < SHAPE_MATCHING_MAX; k++) {
float s = k < n;
center += S.x[C[i].id[k]] * s;
}
center /= (float)n;
Vec3 x[SHAPE_MATCHING_MAX];
for (int k = 0; k < SHAPE_MATCHING_MAX; k++)
x[k] = S.x[C[i].id[k]] - center;
Matrix3 A = Matrix3::Zero();
for (int k = 0; k < SHAPE_MATCHING_MAX; k++)
A += x[k] * C[i].xp[k].transpose();
for (int iter = 0; iter < 10; iter++) {
Matrix3 R = C[i].q.matrix();
Vec3 omega =
(R.col(0).cross(A.col(0)) + R.col(1).cross(A.col(1)) +
R.col(2).cross(A.col(2))) *
(1.0 / fabs(R.col(0).dot(A.col(0)) + R.col(1).dot(A.col(1)) +
R.col(2).dot(A.col(2))) +
1.0e-9);
float w = (float)omega.norm();
if (w < 1.0e-6f)
break;
C[i].q = Quaternion(Rotation(w, (float)(1.0 / w) * omega)) * C[i].q;
C[i].q.normalize();
}
float scale = 0.0f;
for (int k = 0; k < SHAPE_MATCHING_MAX; k++) {
float s = k < n;
scale += S.r[C[i].id[k]] * s;
}
scale /= (float)n * C[i].r;
float wSum = 0.f;
Vec3 grad[SHAPE_MATCHING_MAX];
for (int k = 0; k < SHAPE_MATCHING_MAX; k++) {
grad[k] = x[k] - C[i].q * C[i].xp[k] * scale;
wSum += grad[k].squaredNorm();
}
float alpha = 0.f;
float dL = (-wSum - alpha * L[i]) / (wSum + alpha);
float weight = 1.f;
for (int k = 0; k < SHAPE_MATCHING_MAX; k++) {
if (k < n && S.w[C[i].id[k]] > 0.f) {
Vec3 corr = dL * grad[k];
P.id[i * SHAPE_MATCHING_MAX + k] = C[i].id[k];
P.dx[i * SHAPE_MATCHING_MAX + k].head<3>() = corr * weight;
P.dx[i * SHAPE_MATCHING_MAX + k][4] = weight;
}
}
L[i] += dL;
}
};
__device__ float closestPtPointPill(const Vec3 &p, const Vec3 &a, const Vec3 &b,
float ra, float rb, float L, float sigma,
float &t, Vec &c, float &r) {
Vec3 ab = b - a;
Vec3 abn = ab / L;
Vec3 ap = p - a;
Vec3 p_proj = ap - abn * ap.dot(abn);
Vec3 offset = abn * p_proj.norm() * sigma;
t = min(1.f, max(0.f, abn.dot(ap + offset) / L));
c = (1.f - t) * a + t * b;
r = (1.f - t) * ra + t * rb;
if (std::isnan(sigma))
return min(ap.norm() - ra, (p - b).norm() - rb);
return (c - p).norm() - r;
}
__device__ float pillDistanceU(const Vec3 *x, const float *r, const Vec2i &a,
const Vec2i &b, float LB, float sigmaB, float u,
float &v, Vec3 &pa, Vec3 &pb) {
pa = (1.f - u) * x[a(0)] + u * x[a(1)];
float ra = (1.f - u) * r[a(0)] + u * r[a(1)];
float rc;
return closestPtPointPill(pa, x[b(0)], x[b(1)], r[b(0)], r[b(1)], LB,
sigmaB, v, pb, rc) -
ra;
}
__device__ float closestPtPills(const Vec3 *x, const float *r, const Vec2i &a,
const Vec2i &b, Vec2 &uv, Vec &pa, Vec &pb) {
Vec2 range = Vec2(0.f, 1.f);
float eps = 1e-6f;
float LB = (x[b(0)] - x[b(1)]).norm();
float sigmaB = tan(asin((r[b[1]] - r[b[0]]) / LB));
for (int j = 0; j < 10; j++) {
float mid = 0.5f * range.sum();
float ua = mid - eps;
float ub = mid + eps;
float v;
Vec3 pa, pb;
float fa = pillDistanceU(x, r, a, b, LB, sigmaB, ua, v, pa, pb);
float fb = pillDistanceU(x, r, a, b, LB, sigmaB, ub, v, pa, pb);
if (fa == fb)
range = Vec2(ua, ub);
else if (fa > fb)
range(0) = ua;
else
range(1) = ub;
}
uv(0) = 0.5f * range.sum();
return pillDistanceU(x, r, a, b, LB, sigmaB, uv(0), uv(1), pa, pb);
}
struct C_collision_solve {
C_collision_solve(C_collision *C, CudaStatePtr S, CudaProjectionsPtr P)
: C(C), S(S), P(P) {}
C_collision *C;
CudaStatePtr S;
CudaProjectionsPtr P;
__device__ void operator()(int i) const {
int a0 = C[i].a[0];
int a1 = C[i].a[1];
int b0 = C[i].b[0];
int b1 = C[i].b[1];
Vec2 uv;
Vec3 pa, pb;
float dist = closestPtPills(S.x, S.r, C[i].a, C[i].b, uv, pa, pb);
float alpha1 = 1.f - uv(0);
float alpha2 = uv(0);
float beta1 = 1.f - uv(1);
float beta2 = uv(1);
Vec3 ba = (pa - pb).normalized();
float wSum = S.w[a0] * alpha1 * alpha1 + S.w[a1] * alpha2 * alpha2 +
S.w[b0] * beta1 * beta1 + S.w[b1] * beta2 * beta2;
Vec3 corr = dist / (wSum + 1e-7f) * ba;
float s = dist < 0.0f && wSum > 1e-5f;
P.id[4 * i + 0] = a0;
P.id[4 * i + 1] = a1;
P.id[4 * i + 2] = b0;
P.id[4 * i + 3] = b1;
P.dx[4 * i + 0].head<3>() = -alpha1 * S.w[a0] * corr * s;
P.dx[4 * i + 1].head<3>() = -alpha2 * S.w[a1] * corr * s;
P.dx[4 * i + 2].head<3>() = beta1 * S.w[b0] * corr * s;
P.dx[4 * i + 3].head<3>() = beta2 * S.w[b1] * corr * s;
P.dx[4 * i + 0][4] = s;
P.dx[4 * i + 1][4] = s;
P.dx[4 * i + 2][4] = s;
P.dx[4 * i + 3][4] = s;
}
};
} // namespace viper
|
the_stack
|
// GPU copy benchmark tests dtoh/htod/dtod data transfer bandwidth by GPU SM/DMA.
#include <chrono>
#include <cstdio>
#include <cstring>
#include <string>
#include <vector>
#include <getopt.h>
#include <numa.h>
#include <cuda.h>
#include <cuda_runtime.h>
// Arguments for each benchmark run.
struct BenchArgs {
// Whether source device is GPU.
bool is_src_dev_gpu = false;
// Whether destination device is GPU.
bool is_dst_dev_gpu = false;
// GPU IDs for source device (if applicable).
int src_gpu_id = 0;
// GPU IDs for destination device (if applicable).
int dst_gpu_id = 0;
// GPU IDs for worker device.
int worker_gpu_id = 0;
// Uses SM copy, otherwise DMA copy.
bool is_sm_copy = false;
// NUMA node under which the benchmark is done.
uint64_t numa_id = 0;
// Data buffer size used.
uint64_t size = 0;
// Number of loops to run.
uint64_t num_loops = 0;
};
struct Buffers {
// Original data buffer.
uint8_t *data_buf = nullptr;
// Buffer to validate the correctness of data transfer.
uint8_t *check_buf = nullptr;
// Host pointer of the data buffer on source device.
uint8_t *src_dev_host_buf_ptr = nullptr;
// GPU pointer of the data buffer on source devices.
uint8_t *src_dev_gpu_buf_ptr = nullptr;
// Host pointer of the data buffer on destination device.
uint8_t *dst_dev_host_buf_ptr = nullptr;
// GPU pointer of the data buffer on destination devices.
uint8_t *dst_dev_gpu_buf_ptr = nullptr;
};
// Options accepted by this program.
struct Opts {
// Data buffer size for copy benchmark.
uint64_t size;
// Data buffer size for copy benchmark.
uint64_t num_loops;
// Whether GPU SM copy needs to be evaluated.
bool sm_copy_enabled = false;
// Whether GPU DMA copy needs to be evaluated.
bool dma_copy_enabled = false;
// Whether host-to-device transfer needs to be evaluated.
bool htod_enabled = false;
// Whether device-to-host transfer needs to be evaluated.
bool dtoh_enabled = false;
// Whether device-to-device transfer needs to be evaluated.
bool dtod_enabled = false;
};
// Pring usage of this program.
void PrintUsage() {
printf("Usage: gpu_copy "
"--size <size> "
"--num_loops <num_loops> "
"[--sm_copy] "
"[--dma_copy] "
"[--htod] "
"[--dtoh] "
"[--dtod]\n");
}
// Parse options of this program.
int ParseOpts(int argc, char **argv, Opts *opts) {
enum class OptIdx { kSize, kNumIters, kEnableSmCopy, kEnableDmaCopy, kEnableHToD, kEnableDToH, kEnableDToD };
const struct option options[] = {{"size", required_argument, nullptr, static_cast<int>(OptIdx::kSize)},
{"num_loops", required_argument, nullptr, static_cast<int>(OptIdx::kNumIters)},
{"sm_copy", no_argument, nullptr, static_cast<int>(OptIdx::kEnableSmCopy)},
{"dma_copy", no_argument, nullptr, static_cast<int>(OptIdx::kEnableDmaCopy)},
{"htod", no_argument, nullptr, static_cast<int>(OptIdx::kEnableHToD)},
{"dtoh", no_argument, nullptr, static_cast<int>(OptIdx::kEnableDToH)},
{"dtod", no_argument, nullptr, static_cast<int>(OptIdx::kEnableDToD)}};
int getopt_ret = 0;
int opt_idx = 0;
bool size_specified = false;
bool num_loops_specified = false;
bool parse_err = false;
while (true) {
getopt_ret = getopt_long(argc, argv, "", options, &opt_idx);
if (getopt_ret == -1) {
if (!size_specified || !num_loops_specified) {
parse_err = true;
}
break;
} else if (getopt_ret == '?') {
parse_err = true;
break;
}
switch (opt_idx) {
case static_cast<int>(OptIdx::kSize):
if (1 != sscanf(optarg, "%lu", &(opts->size))) {
fprintf(stderr, "Invalid size: %s\n", optarg);
parse_err = true;
} else {
size_specified = true;
}
break;
case static_cast<int>(OptIdx::kNumIters):
if (1 != sscanf(optarg, "%lu", &(opts->num_loops))) {
fprintf(stderr, "Invalid num_loops: %s\n", optarg);
parse_err = true;
} else {
num_loops_specified = true;
}
break;
case static_cast<int>(OptIdx::kEnableSmCopy):
opts->sm_copy_enabled = true;
break;
case static_cast<int>(OptIdx::kEnableDmaCopy):
opts->dma_copy_enabled = true;
break;
case static_cast<int>(OptIdx::kEnableHToD):
opts->htod_enabled = true;
break;
case static_cast<int>(OptIdx::kEnableDToH):
opts->dtoh_enabled = true;
break;
case static_cast<int>(OptIdx::kEnableDToD):
opts->dtod_enabled = true;
break;
default:
parse_err = true;
}
if (parse_err) {
break;
}
}
if (parse_err) {
PrintUsage();
return -1;
}
return 0;
}
// Get nubmer of GPUs available.
int GetGpuCount(int *gpu_count) {
cudaError_t cuda_err = cudaGetDeviceCount(gpu_count);
if (cuda_err != cudaSuccess) {
fprintf(stderr, "GetGpuCount::cudaGetDeviceCount error: %d\n", cuda_err);
return -1;
}
return 0;
}
// Set GPU context according to device ID.
int SetGpu(int gpu_id) {
cudaError_t cuda_err = cudaSetDevice(gpu_id);
if (cuda_err != cudaSuccess) {
fprintf(stderr, "SetGpu::cudaSetDevice %d error: %d\n", gpu_id, cuda_err);
return -1;
}
return 0;
}
// Prepare data buffers to be used.
int PrepareBuf(const BenchArgs &args, Buffers *buffers) {
cudaError_t cuda_err = cudaSuccess;
constexpr int uint8_mod = 256;
// Generate data to copy
buffers->data_buf = static_cast<uint8_t *>(numa_alloc_onnode(args.size, args.numa_id));
for (int i = 0; i < args.size; i++) {
buffers->data_buf[i] = static_cast<uint8_t>(i % uint8_mod);
}
// Reset check buffer
buffers->check_buf = static_cast<uint8_t *>(numa_alloc_onnode(args.size, args.numa_id));
memset(buffers->check_buf, 0, args.size);
// Allocate buffers for src/dst devices
constexpr int num_devices = 2;
bool is_dev_gpu[num_devices] = {args.is_src_dev_gpu, args.is_dst_dev_gpu};
int dev_ids[num_devices] = {args.src_gpu_id, args.dst_gpu_id};
uint8_t **host_buf_ptrs[num_devices] = {&(buffers->src_dev_host_buf_ptr), &(buffers->dst_dev_host_buf_ptr)};
uint8_t **gpu_buf_ptrs[num_devices] = {&(buffers->src_dev_gpu_buf_ptr), &(buffers->dst_dev_gpu_buf_ptr)};
for (int i = 0; i < num_devices; i++) {
// Allocate buffers
if (is_dev_gpu[i]) {
// Set to buffer device for GPU buffer
if (SetGpu(dev_ids[i])) {
return -1;
}
*(host_buf_ptrs[i]) = nullptr;
cuda_err = cudaMalloc(gpu_buf_ptrs[i], args.size);
if (cuda_err != cudaSuccess) {
fprintf(stderr, "PrepareBuf::cudaMalloc error: %d\n", cuda_err);
return -1;
}
} else {
// Set to worker device for host memory buffer
if (SetGpu(args.worker_gpu_id)) {
return -1;
}
*(host_buf_ptrs[i]) = static_cast<uint8_t *>(numa_alloc_onnode(args.size, args.numa_id));
cuda_err = cudaHostRegister(*(host_buf_ptrs[i]), args.size, cudaHostRegisterMapped);
if (cuda_err != cudaSuccess) {
fprintf(stderr, "PrepareBuf::cudaHostRegister error: %d\n", cuda_err);
return -1;
}
cuda_err = cudaHostGetDevicePointer((void **)gpu_buf_ptrs[i], *(host_buf_ptrs[i]), 0);
if (cuda_err != cudaSuccess) {
fprintf(stderr, "PrepareBuf::cudaHostGetDevicePointer error: %d\n", cuda_err);
return -1;
}
}
}
// Initialize source buffer
if (SetGpu(args.src_gpu_id)) {
return -1;
}
cuda_err = cudaMemcpy(buffers->src_dev_gpu_buf_ptr, buffers->data_buf, args.size, cudaMemcpyDefault);
if (cuda_err != cudaSuccess) {
fprintf(stderr, "PrepareBuf::cudaMemcpy error: %d\n", cuda_err);
return -1;
}
return 0;
}
// Validate the result of data transfer.
int CheckBuf(const BenchArgs &args, const Buffers &buffers) {
cudaError_t cuda_err = cudaSuccess;
// Copy result
if (SetGpu(args.dst_gpu_id)) {
return -1;
}
cuda_err = cudaMemcpy(buffers.check_buf, buffers.src_dev_gpu_buf_ptr, args.size, cudaMemcpyDefault);
if (cuda_err != cudaSuccess) {
fprintf(stderr, "CheckBuf::cudaMemcpy error: %d\n", cuda_err);
return -1;
}
// Validate result
int memcmp_result = memcmp(buffers.data_buf, buffers.check_buf, args.size);
if (memcmp_result) {
fprintf(stderr, "CheckBuf: Memory check failed\n");
return -1;
}
return 0;
}
// Destroy data buffers
int DestroyBuf(const BenchArgs &args, Buffers *buffers) {
int ret = 0;
cudaError_t cuda_err = cudaSuccess;
// Destroy original data buffer and check buffer
if (buffers->data_buf != nullptr)
numa_free(buffers->data_buf, args.size);
if (buffers->check_buf != nullptr)
numa_free(buffers->check_buf, args.size);
// Only destroy buffers for src/dst devices
constexpr int num_devices = 2;
bool is_dev_gpu[num_devices] = {args.is_src_dev_gpu, args.is_dst_dev_gpu};
int dev_ids[num_devices] = {args.src_gpu_id, args.dst_gpu_id};
uint8_t **host_buf_ptrs[num_devices] = {&(buffers->src_dev_host_buf_ptr), &(buffers->dst_dev_host_buf_ptr)};
uint8_t **gpu_buf_ptrs[num_devices] = {&(buffers->src_dev_gpu_buf_ptr), &(buffers->dst_dev_gpu_buf_ptr)};
for (int i = 0; i < num_devices; i++) {
// Destroy buffers
if (is_dev_gpu[i]) {
if (*(gpu_buf_ptrs[i]) == nullptr) {
continue;
}
// Set to buffer device for GPU buffer
if (SetGpu(dev_ids[i])) {
return -1;
}
cuda_err = cudaFree(*(gpu_buf_ptrs[i]));
if (cuda_err != cudaSuccess) {
fprintf(stderr, "DestroyBuf::cudaFree error: %d\n", cuda_err);
ret = -1;
}
*(gpu_buf_ptrs[i]) = nullptr;
} else {
if (*(host_buf_ptrs[i]) == nullptr) {
continue;
}
// Set to worker device for host memory buffer
if (SetGpu(args.worker_gpu_id)) {
return -1;
}
cuda_err = cudaHostUnregister(*(host_buf_ptrs[i]));
if (cuda_err != cudaSuccess) {
fprintf(stderr, "DestroyBuf::cudaHostUnregister error: %d\n", cuda_err);
ret = -1;
}
numa_free(*(host_buf_ptrs[i]), args.size);
*(host_buf_ptrs[i]) = nullptr;
*(gpu_buf_ptrs[i]) = nullptr;
}
}
return ret;
}
// Unroll depth in SM copy kernel
#define NUM_LOOP_UNROLL 2
// Thread block size
#define NUM_THREADS_IN_BLOCK 128
// Fetch a ulong2 from source memory and write to register
// This kernel references the implementation in
// 1) NCCL:
// https://github.com/NVIDIA/nccl/blob/7e515921295adaab72adf56ea71a0fafb0ecb5f3/src/collectives/device/common_kernel.h#L483
// 2) RCCL:
// https://github.com/ROCmSoftwarePlatform/rccl/blob/5c8380ff5b5925cae4bce00b1879a5f930226e8d/src/collectives/device/common_kernel.h#L268
inline __device__ void FetchULong2(ulong2 &v, const ulong2 *p) {
#if defined(__HIP_PLATFORM_HCC__) || defined(__HCC__) || defined(__HIPCC__)
v.x = p->x;
v.y = p->y;
#else
asm volatile("ld.volatile.global.v2.u64 {%0,%1}, [%2];" : "=l"(v.x), "=l"(v.y) : "l"(p) : "memory");
#endif
}
// Store a ulong2 from register and write to target memory
// This kernel references the implementation in
// 1) NCCL:
// https://github.com/NVIDIA/nccl/blob/7e515921295adaab72adf56ea71a0fafb0ecb5f3/src/collectives/device/common_kernel.h#L486
// 2) RCCL:
// https://github.com/ROCmSoftwarePlatform/rccl/blob/5c8380ff5b5925cae4bce00b1879a5f930226e8d/src/collectives/device/common_kernel.h#L276
inline __device__ void StoreULong2(ulong2 *p, ulong2 &v) {
#if defined(__HIP_PLATFORM_HCC__) || defined(__HCC__) || defined(__HIPCC__)
p->x = v.x;
p->y = v.y;
#else
asm volatile("st.volatile.global.v2.u64 [%0], {%1,%2};" ::"l"(p), "l"(v.x), "l"(v.y) : "memory");
#endif
}
// Fetch data from source memory into register first, and then write them to target memory
// Stride set to thread block size to best utilize cache
__global__ void SMCopyKernel(ulong2 *tgt, const ulong2 *src) {
uint64_t index = blockIdx.x * blockDim.x * NUM_LOOP_UNROLL + threadIdx.x;
ulong2 val[NUM_LOOP_UNROLL];
#pragma unroll
for (uint64_t i = 0; i < NUM_LOOP_UNROLL; i++)
FetchULong2(val[i], src + index + i * blockDim.x);
#pragma unroll
for (uint64_t i = 0; i < NUM_LOOP_UNROLL; i++)
StoreULong2(tgt + index + i * blockDim.x, val[i]);
}
// Print result tag as <src_dev>_to_<dst_dev>_by_<worker_dev>_using_<sm|dma>_under_<numa_node>.
void PringResultTag(const BenchArgs &args) {
if (args.is_src_dev_gpu) {
printf("gpu%d", args.src_gpu_id);
} else {
printf("cpu");
}
printf("_to_");
if (args.is_dst_dev_gpu) {
printf("gpu%d", args.dst_gpu_id);
} else {
printf("cpu");
}
printf("_by_gpu%d_using_%s_under_numa%lu", args.worker_gpu_id, args.is_sm_copy ? "sm" : "dma", args.numa_id);
}
// Run copy benchmark.
int RunCopy(const BenchArgs &args, const Buffers &buffers) {
cudaError_t cuda_err = cudaSuccess;
cudaStream_t stream;
uint64_t num_thread_blocks;
// Set to worker device
if (SetGpu(args.worker_gpu_id)) {
return -1;
}
// Validate data size for SM copy
if (args.is_sm_copy) {
uint64_t num_elements_in_thread_block = NUM_LOOP_UNROLL * NUM_THREADS_IN_BLOCK;
uint64_t num_bytes_in_thread_block = num_elements_in_thread_block * sizeof(ulong2);
if (args.size % num_bytes_in_thread_block) {
fprintf(stderr, "RunCopy: Data size should be multiple of %lu\n", num_bytes_in_thread_block);
return -1;
}
num_thread_blocks = args.size / num_bytes_in_thread_block;
}
// Create stream to launch kernels
cuda_err = cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking);
if (cuda_err != cudaSuccess) {
fprintf(stderr, "RunCopy::cudaStreamCreate error: %d\n", cuda_err);
return -1;
}
// Launch jobs and collect running time
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < args.num_loops; i++) {
if (args.is_sm_copy) {
SMCopyKernel<<<num_thread_blocks, NUM_THREADS_IN_BLOCK, 0, stream>>>(
reinterpret_cast<ulong2 *>(buffers.dst_dev_gpu_buf_ptr),
reinterpret_cast<ulong2 *>(buffers.src_dev_gpu_buf_ptr));
} else {
cudaMemcpyAsync(buffers.dst_dev_gpu_buf_ptr, buffers.src_dev_gpu_buf_ptr, args.size, cudaMemcpyDefault,
stream);
}
}
cuda_err = cudaStreamSynchronize(stream);
auto end = std::chrono::steady_clock::now();
if (cuda_err != cudaSuccess) {
fprintf(stderr, "RunCopy::cudaStreamSynchronize error: %d\n", cuda_err);
return -1;
}
// Destroy stream
cuda_err = cudaStreamDestroy(stream);
if (cuda_err != cudaSuccess) {
fprintf(stderr, "RunCopy::cudaStreamDestroy error: %d\n", cuda_err);
return -1;
}
// Calculate and display bandwidth if no problem
double time_in_sec = std::chrono::duration_cast<std::chrono::duration<double>>(end - start).count();
PringResultTag(args);
printf(" %g\n", args.size * args.num_loops / time_in_sec / 1e9);
return 0;
}
// Enable peer access between a GPU pair. Return whether succeeds.
int EnablePeerAccess(int src_gpu_id, int dst_gpu_id, int *can_access) {
cudaError_t cuda_err = cudaSuccess;
if (src_gpu_id == dst_gpu_id) {
*can_access = 1;
return 0;
}
cuda_err = cudaDeviceCanAccessPeer(can_access, src_gpu_id, dst_gpu_id);
if (cuda_err != cudaSuccess) {
fprintf(stderr, "EnablePeerAccess::cudaDeviceCanAccessPeer error: %d\n", cuda_err);
return -1;
}
if (can_access) {
if (SetGpu(src_gpu_id)) {
return -1;
}
cuda_err = cudaDeviceEnablePeerAccess(dst_gpu_id, 0);
if (cuda_err != cudaErrorPeerAccessAlreadyEnabled && cuda_err != cudaSuccess) {
fprintf(stderr, "EnablePeerAccess::cudaDeviceEnablePeerAccess error: %d\n", cuda_err);
return -1;
}
}
return 0;
}
int RunBench(const BenchArgs &args) {
int ret = 0;
int destroy_buf_ret = 0;
Buffers buffers;
ret = PrepareBuf(args, &buffers);
if (ret == 0) {
ret = RunCopy(args, buffers);
if (ret == 0) {
ret = CheckBuf(args, buffers);
}
}
destroy_buf_ret = DestroyBuf(args, &buffers);
if (ret == 0) {
ret = destroy_buf_ret;
}
return ret;
}
int main(int argc, char **argv) {
int ret = 0;
int numa_count = 0;
int gpu_count = 0;
Opts opts;
BenchArgs args;
std::vector<BenchArgs> args_list;
int can_access = 0;
ret = ParseOpts(argc, argv, &opts);
if (ret != 0) {
return ret;
}
args.num_loops = opts.num_loops;
args.size = opts.size;
// Get number of NUMA nodes
if (numa_available()) {
fprintf(stderr, "main::numa_available error\n");
return -1;
}
numa_count = numa_num_configured_nodes();
// Get number of GPUs
ret = GetGpuCount(&gpu_count);
if (ret != 0) {
return ret;
}
// Scan all NUMA nodes
for (int i = 0; i < numa_count; i++) {
args.numa_id = i;
// Scan all GPUs
for (int j = 0; j < gpu_count; j++) {
// Host-to-device benchmark
if (opts.htod_enabled) {
args.is_src_dev_gpu = false;
args.is_dst_dev_gpu = true;
args.dst_gpu_id = j;
args.worker_gpu_id = j;
if (opts.sm_copy_enabled) {
args.is_sm_copy = true;
args_list.push_back(args);
}
if (opts.dma_copy_enabled) {
args.is_sm_copy = false;
args_list.push_back(args);
}
}
// Device-to-host benchmark
if (opts.dtoh_enabled) {
args.is_src_dev_gpu = true;
args.src_gpu_id = j;
args.is_dst_dev_gpu = false;
args.worker_gpu_id = j;
if (opts.sm_copy_enabled) {
args.is_sm_copy = true;
args_list.push_back(args);
}
if (opts.dma_copy_enabled) {
args.is_sm_copy = false;
args_list.push_back(args);
}
}
// Device-to-device benchmark
if (opts.dtod_enabled) {
args.is_src_dev_gpu = true;
args.src_gpu_id = j;
args.is_dst_dev_gpu = true;
// Scan all peers
for (int k = 0; k < gpu_count; k++) {
args.dst_gpu_id = k;
// P2P write
ret = EnablePeerAccess(j, k, &can_access);
if (ret != 0) {
return -1;
}
if (can_access) {
args.worker_gpu_id = j;
if (opts.sm_copy_enabled) {
args.is_sm_copy = true;
args_list.push_back(args);
}
if (opts.dma_copy_enabled) {
args.is_sm_copy = false;
args_list.push_back(args);
}
}
if (j == k) {
continue;
}
// P2P read
ret = EnablePeerAccess(k, j, &can_access);
if (ret != 0) {
return -1;
}
if (can_access) {
args.worker_gpu_id = k;
if (opts.sm_copy_enabled) {
args.is_sm_copy = true;
args_list.push_back(args);
}
if (opts.dma_copy_enabled) {
args.is_sm_copy = false;
args_list.push_back(args);
}
}
}
}
}
}
for (const BenchArgs &curr_args : args_list) {
ret = numa_run_on_node(curr_args.numa_id);
if (ret != 0) {
fprintf(stderr, "main::numa_run_on_node error: %d\n", errno);
return -1;
}
ret = RunBench(curr_args);
if (ret != 0) {
return -1;
}
}
return ret;
}
|
the_stack
|
Tensor FFModel::concat(int n,
const Tensor* tensors,
int axis,
const char *name)
{
Concat *cat = new Concat(*this, n, tensors, axis, name);
layers.push_back(cat);
return cat->outputs[0];
}
Concat::Concat(FFModel& model,
int _n, const Tensor* _tensors,
int _axis,
const char* name)
: Op(model, OP_CONCAT, name, _n, _tensors), axis(_axis)
{
//TODO: swich to use the Legion dim ordering
int num_dim = inputs[0].numDim;
outputs[0].numDim = num_dim;
for (int i = 0; i < num_dim; i++)
outputs[0].adim[i] = inputs[0].adim[i];
for (int i = 1; i < numInputs; i++)
for (int j = 0; j < num_dim; j++) {
if (j != num_dim - 1 - axis)
assert(inputs[i].adim[j] == outputs[0].adim[j]);
else
outputs[0].adim[j] += inputs[i].adim[j];
}
numOutputs = 1;
numWeights = 0;
}
void Concat::create_weights(FFModel& model)
{
// DO nothing
}
void Concat::create_output_and_partition(FFModel& model)
{
// Retrive the task indexspace for the op
std::string pcname = name;
task_is = model.get_or_create_task_is(inputs[0].numDim, pcname);
Context ctx = model.config.lg_ctx;
Runtime* runtime = model.config.lg_hlr;
Domain domain = runtime->get_index_space_domain(ctx, task_is);
int dims[MAX_TENSOR_DIM], num_dim = inputs[0].numDim;
assert(num_dim == domain.get_dim());
for (int i = 0; i < num_dim; i++)
dims[i] = inputs[0].adim[num_dim-1-i];
for (int i = 1; i < numInputs; i++)
for (int j = 0; j < num_dim; j++) {
if (j != axis)
assert(inputs[i].adim[num_dim-1-j] == dims[j]);
else
dims[j] += inputs[i].adim[num_dim-1-j];
}
//for (int i = 0; i < num_dim; i++)
//printf("concat: dim[%d] = %d\n", i, dims[i]);
switch (domain.get_dim()) {
#define DIMFUNC(DIM) \
case DIM: \
{ \
Rect<DIM> part_rect = domain; \
outputs[0] = model.create_tensor<DIM>(dims, DT_FLOAT, this); \
outputs[0].owner_op = this; \
outputs[0].owner_idx = 0; \
for (int i = 0; i < numInputs; i++) { \
Rect<DIM> input_rect = runtime->get_index_partition_color_space( \
ctx, inputs[i].part.get_index_partition()); \
if (input_rect == part_rect) { \
input_lps[i] = inputs[i].part; \
input_grad_lps[i] = inputs[i].part_grad; \
} else { \
model.create_disjoint_partition<DIM>(inputs[i], \
IndexSpaceT<DIM>(task_is), input_lps[i], input_grad_lps[i]); \
} \
} \
break; \
}
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
{
fprintf(stderr, "Unsupported concat dimension number");
assert(false);
}
}
}
void Concat::init_meta(ConcatMeta *m) const
{
m->axis = this->outputs[0].numDim - 1 - this->axis;
}
__host__
OpMeta* Concat::init_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
Concat* cc = (Concat*) task->args;
FFHandler handler = *((const FFHandler*) task->local_args);
ConcatMeta* m = new ConcatMeta(handler);
// Note that our internal axis index ordering is opposite to other frameworks
cc->init_meta(m);
m->profiling = cc->profiling;
return m;
}
void Concat::init(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
Domain domain = runtime->get_index_space_domain(ctx, task_is);
switch (domain.get_dim()) {
#define DIMFUNC(DIM) \
case DIM: \
{ \
Rect<DIM> rect = domain; \
ParallelConfig pc; \
std::string pcname = name; \
ff.config.find_parallel_config(DIM, pcname, pc); \
int idx = 0; \
for (PointInRectIterator<DIM> it(rect); it(); it++) { \
FFHandler handle = ff.handlers[pc.device_ids[idx++]]; \
argmap.set_point(*it, TaskArgument(&handle, sizeof(FFHandler))); \
} \
break; \
}
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
assert(false);
}
IndexLauncher launcher(CONCAT_INIT_TASK_ID, task_is,
TaskArgument(this, sizeof(Concat)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
launcher.add_region_requirement(
RegionRequirement(outputs[0].part, 0/*projection id*/,
WRITE_ONLY, EXCLUSIVE, outputs[0].region));
launcher.add_field(0, FID_DATA);
for (int i = 0; i < numInputs; i++) {
launcher.add_region_requirement(
RegionRequirement(input_lps[i], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[i].region));
launcher.add_field(i + 1, FID_DATA);
}
for (int i = 0; i < numInputs; i++) {
launcher.add_region_requirement(
RegionRequirement(input_grad_lps[i], 0/*projection id*/,
WRITE_ONLY, EXCLUSIVE, inputs[i].region_grad));
launcher.add_field(i + numInputs + 1, FID_DATA);
}
FutureMap fm = runtime->execute_index_space(ctx, launcher);
fm.wait_all_results();
switch (domain.get_dim()) {
#define DIMFUNC(DIM) \
case DIM: \
{ \
Rect<DIM> rect = domain; \
int idx = 0; \
for (PointInRectIterator<DIM> it(rect); it(); it++) { \
meta[idx++] = fm.get_result<OpMeta*>(*it); \
} \
break; \
}
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
assert(false);
}
}
template<int N>
void calc_blk_size(coord_t& num_blocks,
coord_t& blk_size,
Rect<N> rect,
int axis)
{
num_blocks = 1;
blk_size = 1;
for (int d = 0; d < N; d++) {
if (d <= axis)
blk_size *= (rect.hi[d] - rect.lo[d] + 1);
else
num_blocks *= (rect.hi[d] - rect.lo[d] + 1);
}
}
/*static*/
void Concat::forward_kernel(float* output,
float const * const *inputs,
int num_inputs,
int axis,
const Domain& out_domain,
const Domain* in_domain,
cudaStream_t stream)
{
coord_t num_blocks = 1, output_blk_size = 1, input_blk_sizes[MAX_NUM_INPUTS];
assert(num_inputs <= MAX_NUM_INPUTS);
switch (out_domain.get_dim()) {
#define DIMFUNC(DIM) \
case DIM: \
{ \
Rect<DIM> rect = out_domain; \
calc_blk_size<DIM>(num_blocks, output_blk_size, rect, axis); \
for (int i = 0; i < num_inputs; i++) { \
rect = in_domain[i]; \
coord_t input_num_blocks = 1; \
calc_blk_size<DIM>(input_num_blocks, input_blk_sizes[i], rect, axis); \
assert(input_num_blocks == num_blocks); \
} \
break; \
}
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
fprintf(stderr, "Unsupported concat dimension number");
assert(false);
}
for (int i = 0; i < num_inputs; i++) {
copy_with_stride<<<GET_BLOCKS(input_blk_sizes[i]*num_blocks), CUDA_NUM_THREADS, 0, stream>>>(
output, inputs[i], num_blocks, output_blk_size, input_blk_sizes[i]);
//printf("output = %x num_blocks=%d output_blk_size=%d input_blk_size[%d]=%d\n",
// output, num_blocks, output_blk_size, i, input_blk_sizes[i]);
output += input_blk_sizes[i];
}
}
/*
regions[0](O): output
regions[1..numInputs](I): inputs
*/
void Concat::forward_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
const Concat* cc = (Concat*) task->args;
// Note that our internal axis index ordering is opposite to other frameworks
int axis = cc->outputs[0].numDim - 1 - cc->axis;
assert(regions.size() == cc->numInputs + 1);
assert(task->regions.size() == cc->numInputs + 1);
Domain out_domain = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
assert(out_domain.get_dim() == cc->outputs[0].numDim);
Domain in_domain[MAX_NUM_INPUTS];
for (int i = 0; i < cc->numInputs; i++)
in_domain[i] = runtime->get_index_space_domain(
ctx, task->regions[i+1].region.get_index_space());
float *output = helperGetTensorPointerWO<float>(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
const float *inputs[MAX_NUM_INPUTS];
for (int i = 0; i < cc->numInputs; i++)
inputs[i] = helperGetTensorPointerRO<float>(
regions[i+1], task->regions[i+1], FID_DATA, ctx, runtime);
cudaStream_t stream;
checkCUDA(get_legion_stream(&stream));
cudaEvent_t t_start, t_end;
if (cc->profiling) {
cudaEventCreate(&t_start);
cudaEventCreate(&t_end);
cudaEventRecord(t_start, stream);
}
forward_kernel(output, inputs, cc->numInputs, axis, out_domain, in_domain, stream);
if (cc->profiling) {
cudaEventRecord(t_end, stream);
checkCUDA(cudaEventSynchronize(t_end));
//print_tensor<4, float>(output - output_blk_size, output_rect, "[Concat:forward:output]");
//printf("output_blk_size=%zu\n", output_blk_size);
//print_tensor<4, float>(inputs[0], input_rect[0], "[Concat:forward:input0]");
//print_tensor<4, float>(inputs[1], input_rect[1], "[Concat:forward:input1]");
float elapsed = 0;
checkCUDA(cudaEventElapsedTime(&elapsed, t_start, t_end));
printf("[%s] forward time = %.4f ms\n", cc->name, elapsed);
cudaEventDestroy(t_start);
cudaEventDestroy(t_end);
}
}
void Concat::forward(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
IndexLauncher launcher(CONCAT_FWD_TASK_ID, task_is,
TaskArgument(this, sizeof(Concat)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
launcher.add_region_requirement(
RegionRequirement(outputs[0].part, 0/*projection id*/,
WRITE_ONLY, EXCLUSIVE, outputs[0].region));
launcher.add_field(0, FID_DATA);
for (int i = 0; i < numInputs; i++) {
launcher.add_region_requirement(
RegionRequirement(input_lps[i], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[i].region));
launcher.add_field(i + 1, FID_DATA);
}
runtime->execute_index_space(ctx, launcher);
}
void Concat::backward_kernel(const float* output_grad,
float** input_grads,
int num_inputs,
int axis,
const Domain& out_grad_domain,
const Domain* in_grad_domain,
cudaStream_t stream)
{
coord_t num_blocks = 1, output_blk_size = 1, input_blk_sizes[MAX_NUM_INPUTS];
assert(num_inputs <= MAX_NUM_INPUTS);
switch (out_grad_domain.get_dim()) {
#define DIMFUNC(DIM) \
case DIM: \
{ \
Rect<DIM> rect = out_grad_domain; \
calc_blk_size<DIM>(num_blocks, output_blk_size, rect, axis); \
for (int i = 0; i < num_inputs; i++) { \
rect = in_grad_domain[i]; \
coord_t input_num_blocks = 1; \
calc_blk_size<DIM>(input_num_blocks, input_blk_sizes[i], rect, axis); \
assert(input_num_blocks == num_blocks); \
} \
break; \
}
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
fprintf(stderr, "Unsupported concat dimension number");
assert(false);
}
for (int i = 0; i < num_inputs; i++) {
add_with_stride<<<GET_BLOCKS(input_blk_sizes[i]*num_blocks), CUDA_NUM_THREADS, 0, stream>>>(
input_grads[i], output_grad, num_blocks, input_blk_sizes[i], output_blk_size);
output_grad += input_blk_sizes[i];
}
//Rect<2> output_rect(Point<2>(0, 0), Point<2>(output_blk_size-1, batch_size - 1));
//Rect<2> input_rect(Point<2>(0, 0), Point<2>(input_blk_sizes[0]-1, batch_size - 1));
//print_tensor<2, float>(output_grad - output_blk_size, output_rect, "[Concat:backward:output]");
//print_tensor<2, float>(input_grads[0], input_rect, "[Concat:backward:input0]");
}
/*
regions[0](I): output_grad
regions[1..numInputs](I/O): input_grad
*/
void Concat::backward_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
const Concat* cc = (Concat*) task->args;
// Note that our internal axis index ordering is opposite to other frameworks
int axis = cc->outputs[0].numDim - 1 - cc->axis;
assert(regions.size() == cc->numInputs + 1);
assert(task->regions.size() == cc->numInputs + 1);
assert(cc->numInputs <= MAX_NUM_INPUTS);
Domain out_grad_domain = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
assert(out_grad_domain.get_dim() == cc->outputs[0].numDim);
Domain in_grad_domains[MAX_NUM_INPUTS];
for (int i = 0; i < cc->numInputs; i++)
in_grad_domains[i] = runtime->get_index_space_domain(
ctx, task->regions[i+1].region.get_index_space());
const float *output_grad = helperGetTensorPointerRO<float>(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
float *input_grads[MAX_NUM_INPUTS];
for (int i = 0; i < cc->numInputs; i++)
input_grads[i] = helperGetTensorPointerRW<float>(
regions[i+1], task->regions[i+1], FID_DATA, ctx, runtime);
cudaStream_t stream;
checkCUDA(get_legion_stream(&stream));
cudaEvent_t t_start, t_end;
if (cc->profiling) {
cudaEventCreate(&t_start);
cudaEventCreate(&t_end);
cudaEventRecord(t_start, stream);
}
backward_kernel(output_grad, input_grads, cc->numInputs, axis,
out_grad_domain, in_grad_domains, stream);
if (cc->profiling) {
cudaEventRecord(t_end, stream);
checkCUDA(cudaEventSynchronize(t_end));
float elapsed = 0;
checkCUDA(cudaEventElapsedTime(&elapsed, t_start, t_end));
printf("[%s] forward time = %.4f ms\n", cc->name, elapsed);
cudaEventDestroy(t_start);
cudaEventDestroy(t_end);
}
}
void Concat::backward(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
IndexLauncher launcher(CONCAT_BWD_TASK_ID, task_is,
TaskArgument(this, sizeof(Concat)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
launcher.add_region_requirement(
RegionRequirement(outputs[0].part_grad, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, outputs[0].region_grad));
launcher.add_field(0, FID_DATA);
for (int i = 0; i < numInputs; i++) {
launcher.add_region_requirement(
RegionRequirement(input_grad_lps[i], 0/*projection id*/,
READ_WRITE, EXCLUSIVE, inputs[i].region_grad));
//LogicalRegion lr = inputs[i].region_grad;
//printf("concat[%d]: region(%d,%d,%d)\n", i+1, lr.get_index_space().get_id(), lr.get_field_space().get_id(), lr.get_tree_id());
launcher.add_field(i + 1, FID_DATA);
}
runtime->execute_index_space(ctx, launcher);
}
bool Concat::measure_operator_cost(Simulator* sim,
const ParallelConfig& pc,
CostMetrics& cost_metrics)
{
assert (numInputs <= MAX_NUM_INPUTS);
Tensor sub_inputs[MAX_NUM_INPUTS], sub_output;
if (!outputs[0].get_output_sub_tensor(pc, sub_output, op_type)) {
return false;
}
for (int i = 0; i < numInputs; i++) {
if (!inputs[i].get_input_sub_tensor(pc, sub_inputs[i], op_type)) {
return false;
}
}
ConcatMeta *m = sim->concat_meta;
this->init_meta(m);
sim->free_all();
float *input_ptrs[MAX_NUM_INPUTS];
float *input_grad_ptrs[MAX_NUM_INPUTS];
for (int i = 0; i < numInputs; i++) {
input_ptrs[i] = (float *)sim->allocate(sub_inputs[i].get_volume(), DT_FLOAT);
assert (input_ptrs[i] != NULL);
}
float *output_ptr = (float *)sim->allocate(sub_output.get_volume(), DT_FLOAT);
assert (output_ptr != NULL);
int axis = outputs[0].numDim - 1 - this->axis;
Domain out_domain = sub_output.get_domain();
Domain in_domains[MAX_NUM_INPUTS];
for (int i = 0; i < numInputs; i++) {
in_domains[i] = sub_inputs[i].get_domain();
}
cudaStream_t stream;
checkCUDA(get_legion_stream(&stream));
std::function<void()> forward, backward;
forward = [&] {
forward_kernel(output_ptr, input_ptrs, numInputs, axis, out_domain, in_domains, stream);
};
if (sim->computationMode == COMP_MODE_TRAINING) {
for (int i = 0; i < numInputs; i++) {
input_grad_ptrs[i] = (float *)sim->allocate(sub_inputs[i].get_volume(), DT_FLOAT);
assert (input_grad_ptrs[i] != NULL);
}
float *output_grad_ptr = (float *)sim->allocate(sub_output.get_volume(), DT_FLOAT);
assert (output_grad_ptr != NULL);
backward = [&] {
backward_kernel(output_grad_ptr, input_grad_ptrs,
numInputs, axis, out_domain, in_domains, stream);
};
}
inner_measure_operator_cost(sim, forward, backward, cost_metrics);
if (sim->computationMode == COMP_MODE_TRAINING) {
printf("[Measure Concat] name(%s) forward_time(%.4lf) backward_time(%.4lf)\n",
name,
cost_metrics.forward_time,
cost_metrics.backward_time);
} else {
printf("[Measure Concat] name(%s) forward_time(%.4lf)\n",
name, cost_metrics.forward_time);
}
return true;
}
|
the_stack
|
using namespace at;
namespace {
constexpr int32_t MAX_PROBES = 3;
enum {
OPTIM_SGD = 0,
OPTIM_ADAGRAD = 1,
OPTIM_DENSE = 2,
};
} // namespace
inline void cuda_gemm_batched_fp32_fp32(
cublasOperation_t transa,
cublasOperation_t transb,
int m,
int n,
int k,
float* alpha,
void** a_array,
int lda,
void** b_array,
int ldb,
float* beta,
void** c_array,
int ldc,
int batch_count) {
cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
cublasSetStream(handle, c10::cuda::getCurrentCUDAStream());
cublasGemmBatchedEx(
handle,
transa,
transb,
m,
n,
k,
alpha,
a_array,
CUDA_R_32F,
lda,
b_array,
CUDA_R_32F,
ldb,
beta,
c_array,
CUDA_R_32F,
ldc,
batch_count,
CUDA_R_32F,
CUBLAS_GEMM_DEFAULT);
}
__global__ void init_batch_gemm_backward_2T_kernel(
int32_t N,
const int64_t* __restrict__ colidx,
const int64_t* __restrict__ rowidx,
const int64_t* __restrict__ tableidx,
const int64_t* __restrict__ L,
PackedTensorAccessor32<float, 3, RestrictPtrTraits> tt_cores_0,
PackedTensorAccessor32<float, 3, RestrictPtrTraits> tt_cores_1,
PackedTensorAccessor32<float, 2, RestrictPtrTraits> tr_tt_cores_0,
PackedTensorAccessor32<float, 2, RestrictPtrTraits> tr_tt_cores_1,
PackedTensorAccessor32<float, 3, RestrictPtrTraits> d_output,
int32_t* __restrict__ tt_idx,
float** __restrict__ a0_ptr,
float** __restrict__ b0_ptr,
float** __restrict__ c0_ptr,
float** __restrict__ a1_ptr,
float** __restrict__ b1_ptr,
float** __restrict__ c1_ptr) {
int n = blockIdx.x * blockDim.x + threadIdx.x;
if (n < N) {
auto cidx = __ldg(&colidx[n]);
auto ridx = __ldg(&rowidx[n]);
auto tidx = __ldg(&tableidx[n]);
int32_t tt_idx_0 = cidx / L[0];
cidx = cidx % L[0];
int32_t tt_idx_1 = cidx / L[1];
tt_idx[0 * N + n] = tt_idx_0;
tt_idx[1 * N + n] = tt_idx_1;
float* d_output_ptr = (float*)&(d_output[tidx][ridx][0]);
a0_ptr[0 * N + n] = (float*)&(tt_cores_0[tidx][tt_idx_0][0]);
b0_ptr[0 * N + n] = d_output_ptr;
c0_ptr[0 * N + n] = (float*)&(tr_tt_cores_1[n][0]);
a1_ptr[0 * N + n] = d_output_ptr;
b1_ptr[0 * N + n] = (float*)&(tt_cores_1[tidx][tt_idx_1][0]);
c1_ptr[0 * N + n] = (float*)&(tr_tt_cores_0[n][0]);
}
}
__global__ void init_batch_gemm_backward_3T_kernel(
int32_t N,
const int64_t* __restrict__ colidx,
const int64_t* __restrict__ rowidx,
const int64_t* __restrict__ tableidx,
const int64_t* __restrict__ L,
PackedTensorAccessor32<float, 3, RestrictPtrTraits> tt_cores_0,
PackedTensorAccessor32<float, 3, RestrictPtrTraits> tt_cores_1,
PackedTensorAccessor32<float, 3, RestrictPtrTraits> tt_cores_2,
PackedTensorAccessor32<float, 2, RestrictPtrTraits> tr_tt_cores_0,
PackedTensorAccessor32<float, 2, RestrictPtrTraits> tr_tt_cores_1,
PackedTensorAccessor32<float, 2, RestrictPtrTraits> tr_tt_cores_2,
PackedTensorAccessor32<float, 2, RestrictPtrTraits> tr_0,
PackedTensorAccessor32<float, 3, RestrictPtrTraits> d_output,
int32_t* __restrict__ tt_idx,
float** __restrict__ a_ptr,
float** __restrict__ b_ptr,
float** __restrict__ c_ptr,
float** __restrict__ a0_ptr,
float** __restrict__ b0_ptr,
float** __restrict__ c0_ptr,
float** __restrict__ a1_ptr,
float** __restrict__ b1_ptr,
float** __restrict__ c1_ptr) {
int n = blockIdx.x * blockDim.x + threadIdx.x;
if (n < N) {
auto cidx = __ldg(&colidx[n]);
auto ridx = __ldg(&rowidx[n]);
auto tidx = __ldg(&tableidx[n]);
int32_t tt_idx_0 = cidx / L[0];
cidx = cidx % L[0];
int32_t tt_idx_1 = cidx / L[1];
cidx = cidx % L[1];
int32_t tt_idx_2 = cidx / L[2];
tt_idx[0 * N + n] = tt_idx_0;
tt_idx[1 * N + n] = tt_idx_1;
tt_idx[2 * N + n] = tt_idx_2;
float* tr_0_ptr = (float*)&(tr_0[n][0]);
float* d_output_ptr = (float*)&(d_output[tidx][ridx][0]);
float* tt_cores_0_ptr = (float*)&(tt_cores_0[tidx][tt_idx_0][0]);
float* tt_cores_1_ptr = (float*)&(tt_cores_1[tidx][tt_idx_1][0]);
a_ptr[0 * N + n] = tt_cores_1_ptr;
b_ptr[0 * N + n] = tt_cores_0_ptr;
c_ptr[0 * N + n] = tr_0_ptr;
a0_ptr[0 * N + n] = tt_cores_0_ptr;
b0_ptr[0 * N + n] = tr_0_ptr;
c0_ptr[0 * N + n] = (float*)&(tr_tt_cores_1[n][0]);
a1_ptr[0 * N + n] = tr_0_ptr;
b1_ptr[0 * N + n] = tt_cores_1_ptr;
c1_ptr[0 * N + n] = (float*)&(tr_tt_cores_0[n][0]);
a0_ptr[1 * N + n] = tr_0_ptr;
b0_ptr[1 * N + n] = d_output_ptr;
c0_ptr[1 * N + n] = (float*)&(tr_tt_cores_2[n][0]);
a1_ptr[1 * N + n] = d_output_ptr;
b1_ptr[1 * N + n] = (float*)&(tt_cores_2[tidx][tt_idx_2][0]);
c1_ptr[1 * N + n] = tr_0_ptr;
}
}
__global__ void init_batch_gemm_backward_4T_kernel(
int32_t N,
const int64_t* __restrict__ colidx,
const int64_t* __restrict__ rowidx,
const int64_t* __restrict__ tableidx,
const int64_t* __restrict__ L,
PackedTensorAccessor32<float, 3, RestrictPtrTraits> tt_cores_0,
PackedTensorAccessor32<float, 3, RestrictPtrTraits> tt_cores_1,
PackedTensorAccessor32<float, 3, RestrictPtrTraits> tt_cores_2,
PackedTensorAccessor32<float, 3, RestrictPtrTraits> tt_cores_3,
PackedTensorAccessor32<float, 2, RestrictPtrTraits> tr_tt_cores_0,
PackedTensorAccessor32<float, 2, RestrictPtrTraits> tr_tt_cores_1,
PackedTensorAccessor32<float, 2, RestrictPtrTraits> tr_tt_cores_2,
PackedTensorAccessor32<float, 2, RestrictPtrTraits> tr_tt_cores_3,
PackedTensorAccessor32<float, 2, RestrictPtrTraits> tr_0,
PackedTensorAccessor32<float, 2, RestrictPtrTraits> tr_1,
PackedTensorAccessor32<float, 3, RestrictPtrTraits> d_output,
int32_t* __restrict__ tt_idx,
float** __restrict__ a_ptr,
float** __restrict__ b_ptr,
float** __restrict__ c_ptr,
float** __restrict__ a0_ptr,
float** __restrict__ b0_ptr,
float** __restrict__ c0_ptr,
float** __restrict__ a1_ptr,
float** __restrict__ b1_ptr,
float** __restrict__ c1_ptr) {
int n = blockIdx.x * blockDim.x + threadIdx.x;
if (n < N) {
auto cidx = __ldg(&colidx[n]);
auto ridx = __ldg(&rowidx[n]);
auto tidx = __ldg(&tableidx[n]);
int32_t tt_idx_0 = cidx / L[0];
cidx = cidx % L[0];
int32_t tt_idx_1 = cidx / L[1];
cidx = cidx % L[1];
int32_t tt_idx_2 = cidx / L[2];
cidx = cidx % L[2];
int32_t tt_idx_3 = cidx / L[3];
tt_idx[0 * N + n] = tt_idx_0;
tt_idx[1 * N + n] = tt_idx_1;
tt_idx[2 * N + n] = tt_idx_2;
tt_idx[3 * N + n] = tt_idx_3;
float* tr_0_ptr = (float*)&(tr_0[n][0]);
float* tr_1_ptr = (float*)&(tr_1[n][0]);
float* d_output_ptr = (float*)&(d_output[tidx][ridx][0]);
float* tt_cores_0_ptr = (float*)&(tt_cores_0[tidx][tt_idx_0][0]);
float* tt_cores_1_ptr = (float*)&(tt_cores_1[tidx][tt_idx_1][0]);
float* tt_cores_2_ptr = (float*)&(tt_cores_2[tidx][tt_idx_2][0]);
a_ptr[0 * N + n] = tt_cores_1_ptr;
b_ptr[0 * N + n] = tt_cores_0_ptr;
c_ptr[0 * N + n] = tr_0_ptr;
a_ptr[1 * N + n] = tt_cores_2_ptr;
b_ptr[1 * N + n] = tr_0_ptr;
c_ptr[1 * N + n] = tr_1_ptr;
a0_ptr[0 * N + n] = tt_cores_0_ptr;
b0_ptr[0 * N + n] = tr_0_ptr;
c0_ptr[0 * N + n] = (float*)&(tr_tt_cores_1[n][0]);
a1_ptr[0 * N + n] = b0_ptr[0 * N + n];
b1_ptr[0 * N + n] = tt_cores_1_ptr;
c1_ptr[0 * N + n] = (float*)&(tr_tt_cores_0[n][0]);
a0_ptr[1 * N + n] = tr_0_ptr;
b0_ptr[1 * N + n] = tr_1_ptr;
c0_ptr[1 * N + n] = (float*)&(tr_tt_cores_2[n][0]);
a1_ptr[1 * N + n] = b0_ptr[1 * N + n];
b1_ptr[1 * N + n] = tt_cores_2_ptr;
c1_ptr[1 * N + n] = tr_0_ptr;
a0_ptr[2 * N + n] = tr_1_ptr;
b0_ptr[2 * N + n] = d_output_ptr;
c0_ptr[2 * N + n] = (float*)&(tr_tt_cores_3[n][0]);
a1_ptr[2 * N + n] = d_output_ptr;
b1_ptr[2 * N + n] = (float*)&(tt_cores_3[tidx][tt_idx[3 * N + n]][0]);
c1_ptr[2 * N + n] = tr_1_ptr;
}
}
void init_batch_gemm_backward_cuda(
int32_t T,
int32_t N,
const int64_t* __restrict__ colidx,
const int64_t* __restrict__ rowidx,
const int64_t* __restrict__ tableidx,
const int64_t* __restrict__ L,
const std::vector<Tensor>& tt_cores,
const std::vector<Tensor>& tr_tt_cores,
const std::vector<Tensor>& tr,
Tensor d_output,
int32_t* __restrict__ tt_idx,
float** __restrict__ a_ptr,
float** __restrict__ b_ptr,
float** __restrict__ c_ptr,
float** __restrict__ a0_ptr,
float** __restrict__ b0_ptr,
float** __restrict__ c0_ptr,
float** __restrict__ a1_ptr,
float** __restrict__ b1_ptr,
float** __restrict__ c1_ptr) {
int32_t threads = (N > 256 ? 256 : 32);
int32_t num_blocks = (N + threads - 1) / threads;
if (T == 2) {
init_batch_gemm_backward_2T_kernel<<<
num_blocks,
threads,
0,
c10::cuda::getCurrentCUDAStream()>>>(
N,
colidx,
rowidx,
tableidx,
L,
tt_cores[0].packed_accessor32<float, 3, RestrictPtrTraits>(),
tt_cores[1].packed_accessor32<float, 3, RestrictPtrTraits>(),
tr_tt_cores[0].packed_accessor32<float, 2, RestrictPtrTraits>(),
tr_tt_cores[1].packed_accessor32<float, 2, RestrictPtrTraits>(),
d_output.packed_accessor32<float, 3, RestrictPtrTraits>(),
tt_idx,
a0_ptr,
b0_ptr,
c0_ptr,
a1_ptr,
b1_ptr,
c1_ptr);
} else if (T == 3) {
init_batch_gemm_backward_3T_kernel<<<
num_blocks,
threads,
0,
c10::cuda::getCurrentCUDAStream()>>>(
N,
colidx,
rowidx,
tableidx,
L,
tt_cores[0].packed_accessor32<float, 3, RestrictPtrTraits>(),
tt_cores[1].packed_accessor32<float, 3, RestrictPtrTraits>(),
tt_cores[2].packed_accessor32<float, 3, RestrictPtrTraits>(),
tr_tt_cores[0].packed_accessor32<float, 2, RestrictPtrTraits>(),
tr_tt_cores[1].packed_accessor32<float, 2, RestrictPtrTraits>(),
tr_tt_cores[2].packed_accessor32<float, 2, RestrictPtrTraits>(),
tr[0].packed_accessor32<float, 2, RestrictPtrTraits>(),
d_output.packed_accessor32<float, 3, RestrictPtrTraits>(),
tt_idx,
a_ptr,
b_ptr,
c_ptr,
a0_ptr,
b0_ptr,
c0_ptr,
a1_ptr,
b1_ptr,
c1_ptr);
} else if (T == 4) {
init_batch_gemm_backward_4T_kernel<<<
num_blocks,
threads,
0,
c10::cuda::getCurrentCUDAStream()>>>(
N,
colidx,
rowidx,
tableidx,
L,
tt_cores[0].packed_accessor32<float, 3, RestrictPtrTraits>(),
tt_cores[1].packed_accessor32<float, 3, RestrictPtrTraits>(),
tt_cores[2].packed_accessor32<float, 3, RestrictPtrTraits>(),
tt_cores[3].packed_accessor32<float, 3, RestrictPtrTraits>(),
tr_tt_cores[0].packed_accessor32<float, 2, RestrictPtrTraits>(),
tr_tt_cores[1].packed_accessor32<float, 2, RestrictPtrTraits>(),
tr_tt_cores[2].packed_accessor32<float, 2, RestrictPtrTraits>(),
tr_tt_cores[3].packed_accessor32<float, 2, RestrictPtrTraits>(),
tr[0].packed_accessor32<float, 2, RestrictPtrTraits>(),
tr[1].packed_accessor32<float, 2, RestrictPtrTraits>(),
d_output.packed_accessor32<float, 3, RestrictPtrTraits>(),
tt_idx,
a_ptr,
b_ptr,
c_ptr,
a0_ptr,
b0_ptr,
c0_ptr,
a1_ptr,
b1_ptr,
c1_ptr);
}
}
__global__ void update_d_tt_cores_kernel(
int32_t N,
int32_t D,
const int32_t* __restrict__ tt_idx,
const int64_t* __restrict__ tableidx,
PackedTensorAccessor32<float, 2, RestrictPtrTraits> tr_tt_cores,
PackedTensorAccessor32<float, 3, RestrictPtrTraits> d_tt_cores) {
int32_t n = blockIdx.x * blockDim.y + threadIdx.y;
if (n < N) {
auto idx = __ldg(&tt_idx[n]);
auto tidx = __ldg(&tableidx[n]);
for (int32_t d = threadIdx.x; d < D; d += blockDim.x) {
atomicAdd(&(d_tt_cores[tidx][idx][d]), tr_tt_cores[n][d]);
}
}
}
__global__ void update_tt_cores_sgd_kernel(
int32_t B,
int32_t D,
int32_t num_tables,
float learning_rate,
PackedTensorAccessor32<float, 3, RestrictPtrTraits> d_tt_cores,
PackedTensorAccessor32<float, 3, RestrictPtrTraits> tt_cores) {
int32_t b = blockIdx.x * blockDim.y + threadIdx.y;
if (b >= B) {
return;
}
for (int32_t i = 0; i < num_tables; i++) {
for (int32_t d = threadIdx.x; d < D; d += blockDim.x) {
tt_cores[i][b][d] -= learning_rate * d_tt_cores[i][b][d];
}
}
}
__global__ void update_tt_cores_adagrad_kernel(
int32_t B,
int32_t D,
int32_t num_tables,
float learning_rate,
float eps,
PackedTensorAccessor32<float, 3, RestrictPtrTraits> d_tt_cores,
PackedTensorAccessor32<float, 3, RestrictPtrTraits> optimizer_state,
PackedTensorAccessor32<float, 3, RestrictPtrTraits> tt_cores) {
int32_t b = blockIdx.x * blockDim.y + threadIdx.y;
if (b >= B) {
return;
}
for (int32_t i = 0; i < num_tables; i++) {
for (int32_t d = threadIdx.x; d < D; d += blockDim.x) {
optimizer_state[i][b][d] += d_tt_cores[i][b][d] * d_tt_cores[i][b][d];
tt_cores[i][b][d] -= learning_rate * d_tt_cores[i][b][d] /
(sqrt(optimizer_state[i][b][d]) + eps);
}
}
}
std::vector<Tensor> tt_embeddings_backward_cuda(
int32_t optim,
int32_t batch_count,
int32_t D,
float learning_rate,
float eps,
const std::vector<int32_t>& tt_p_shapes,
const std::vector<int32_t>& tt_q_shapes,
const std::vector<int32_t>& tt_ranks,
Tensor L,
int32_t nnz,
Tensor colidx,
Tensor rowidx,
Tensor tableidx,
Tensor d_output,
c10::optional<std::vector<Tensor>> optimizer_state,
std::vector<Tensor>& tt_cores) {
at::cuda::OptionalCUDAGuard device_guard;
device_guard.set_index(d_output.get_device());
int32_t T = tt_p_shapes.size();
int32_t num_tables = tt_cores[0].size(0);
std::vector<Tensor> d_tt_cores;
std::vector<Tensor> tr_tt_cores;
for (int32_t t = 0; t < T; ++t) {
d_tt_cores.push_back(at::zeros_like(tt_cores[t]));
tr_tt_cores.push_back(
at::empty({batch_count, tt_cores[t].size(2)}, tt_cores[t].options()));
}
if (nnz == 0) {
return d_tt_cores;
}
// batch gemm parameters
std::vector<int32_t> m(T - 1);
std::vector<int32_t> n(T - 1);
std::vector<int32_t> k(T - 1);
float alpha = 1.0;
float beta = 0.0;
int32_t m_ = tt_q_shapes[0];
for (int32_t t = 0; t < T - 1; ++t) {
m[t] = m_;
k[t] = tt_ranks[t + 1];
n[t] = tt_q_shapes[t + 1] * tt_ranks[t + 2];
m_ = m_ * tt_q_shapes[t + 1];
}
// allocate the immediate buffers
std::vector<Tensor> tr;
int64_t tr_size = tt_q_shapes[0] * tt_ranks[1];
for (int32_t t = 0; t < T - 2; ++t) {
tr_size = tr_size * tt_q_shapes[t + 1] * tt_ranks[t + 2] / tt_ranks[t + 1];
tr.push_back(at::empty({batch_count, tr_size}, tt_cores[0].options()));
}
auto tt_idx =
at::empty({T * batch_count}, tt_cores[0].options().dtype(at::kInt));
auto a_ptr_tensor = at::empty(
{(T - 2) * batch_count}, tt_cores[0].options().dtype(at::kLong));
auto b_ptr_tensor = at::empty(
{(T - 2) * batch_count}, tt_cores[0].options().dtype(at::kLong));
auto c_ptr_tensor = at::empty(
{(T - 2) * batch_count}, tt_cores[0].options().dtype(at::kLong));
float** a_ptr = (float**)a_ptr_tensor.data_ptr<int64_t>();
float** b_ptr = (float**)b_ptr_tensor.data_ptr<int64_t>();
float** c_ptr = (float**)c_ptr_tensor.data_ptr<int64_t>();
auto a0_ptr_tensor = at::empty(
{(T - 1) * batch_count}, tt_cores[0].options().dtype(at::kLong));
auto b0_ptr_tensor = at::empty(
{(T - 1) * batch_count}, tt_cores[0].options().dtype(at::kLong));
auto c0_ptr_tensor = at::empty(
{(T - 1) * batch_count}, tt_cores[0].options().dtype(at::kLong));
float** a0_ptr = (float**)a0_ptr_tensor.data_ptr<int64_t>();
float** b0_ptr = (float**)b0_ptr_tensor.data_ptr<int64_t>();
float** c0_ptr = (float**)c0_ptr_tensor.data_ptr<int64_t>();
auto a1_ptr_tensor = at::empty(
{(T - 1) * batch_count}, tt_cores[0].options().dtype(at::kLong));
auto b1_ptr_tensor = at::empty(
{(T - 1) * batch_count}, tt_cores[0].options().dtype(at::kLong));
auto c1_ptr_tensor = at::empty(
{(T - 1) * batch_count}, tt_cores[0].options().dtype(at::kLong));
float** a1_ptr = (float**)a1_ptr_tensor.data_ptr<int64_t>();
float** b1_ptr = (float**)b1_ptr_tensor.data_ptr<int64_t>();
float** c1_ptr = (float**)c1_ptr_tensor.data_ptr<int64_t>();
for (int32_t start_idx = 0; start_idx < nnz; start_idx += batch_count) {
int32_t end_idx =
start_idx + batch_count < nnz ? start_idx + batch_count : nnz;
int32_t N = end_idx - start_idx;
init_batch_gemm_backward_cuda(
T,
N,
&(colidx.data_ptr<int64_t>()[start_idx]),
&(rowidx.data_ptr<int64_t>()[start_idx]),
&(tableidx.data_ptr<int64_t>()[start_idx]),
L.data_ptr<int64_t>(),
tt_cores,
tr_tt_cores,
tr,
d_output,
tt_idx.data_ptr<int32_t>(),
a_ptr,
b_ptr,
c_ptr,
a0_ptr,
b0_ptr,
c0_ptr,
a1_ptr,
b1_ptr,
c1_ptr);
// recompute forward
for (int32_t t = 0; t < T - 2; ++t) {
cuda_gemm_batched_fp32_fp32(
CUBLAS_OP_N,
CUBLAS_OP_N,
n[t],
m[t],
k[t],
&alpha,
(void**)&(a_ptr[t * N]),
n[t],
(void**)&(b_ptr[t * N]),
k[t],
&beta,
(void**)&(c_ptr[t * N]),
n[t],
N);
} // for (int32_t t = 0; t < T - 2; ++t)
// backward propagation
for (int32_t t = T - 2; t >= 0; --t) {
cuda_gemm_batched_fp32_fp32(
CUBLAS_OP_N,
CUBLAS_OP_T,
n[t],
k[t],
m[t],
&alpha,
(void**)&(b0_ptr[t * N]),
n[t],
(void**)&(a0_ptr[t * N]),
k[t],
&beta,
(void**)&(c0_ptr[t * N]),
n[t],
N);
int32_t D_0 = tt_cores[t + 1].size(2);
int32_t tx_0 = std::min(1024, D_0);
int32_t ty_0 = 1024 / tx_0;
update_d_tt_cores_kernel<<<
div_round_up(N, ty_0),
dim3(tx_0, ty_0),
0,
c10::cuda::getCurrentCUDAStream()>>>(
N,
D_0,
&(tt_idx.data_ptr<int32_t>()[(t + 1) * N]),
&(tableidx.data_ptr<int64_t>()[start_idx]),
tr_tt_cores[t + 1].packed_accessor32<float, 2, RestrictPtrTraits>(),
d_tt_cores[t + 1].packed_accessor32<float, 3, RestrictPtrTraits>());
cuda_gemm_batched_fp32_fp32(
CUBLAS_OP_T,
CUBLAS_OP_N,
k[t],
m[t],
n[t],
&alpha,
(void**)&(b1_ptr[t * N]),
n[t],
(void**)&(a1_ptr[t * N]),
n[t],
&beta,
(void**)&(c1_ptr[t * N]),
k[t],
N);
if (t == 0) {
int32_t D_1 = tt_cores[0].size(2);
int32_t tx_1 = std::min(1024, D_1);
int32_t ty_1 = 1024 / tx_1;
update_d_tt_cores_kernel<<<
div_round_up(N, ty_1),
dim3(tx_1, ty_1),
0,
c10::cuda::getCurrentCUDAStream()>>>(
N,
D_1,
&(tt_idx.data_ptr<int32_t>()[t * N]),
&(tableidx.data_ptr<int64_t>()[start_idx]),
tr_tt_cores[0].packed_accessor32<float, 2, RestrictPtrTraits>(),
d_tt_cores[0].packed_accessor32<float, 3, RestrictPtrTraits>());
}
} // for (int32_t t = T - 2; t >=0 ; --t)
} // for (int32_t start_idx = 0; start_idx < nnz; start_idx += batch_count)
if (optim == OPTIM_ADAGRAD) {
for (int32_t t = 0; t < T; ++t) {
int32_t y_size = tt_cores[t].size(1);
int32_t x_size = tt_cores[t].size(2);
int32_t tx = std::min(1024, y_size);
int32_t ty = 1024 / tx;
update_tt_cores_adagrad_kernel<<<
div_round_up(x_size, ty),
dim3(tx, ty),
0,
c10::cuda::getCurrentCUDAStream()>>>(
y_size,
x_size,
num_tables,
learning_rate,
eps,
d_tt_cores[t].packed_accessor32<float, 3, RestrictPtrTraits>(),
(*optimizer_state)[t]
.packed_accessor32<float, 3, RestrictPtrTraits>(),
tt_cores[t].packed_accessor32<float, 3, RestrictPtrTraits>());
}
} else if (optim == OPTIM_SGD) {
for (int32_t t = 0; t < T; ++t) {
int32_t y_size = tt_cores[t].size(1);
int32_t x_size = tt_cores[t].size(2);
int32_t tx = std::min(1024, y_size);
int32_t ty = 1024 / tx;
update_tt_cores_sgd_kernel<<<
div_round_up(x_size, ty),
dim3(tx, ty),
0,
c10::cuda::getCurrentCUDAStream()>>>(
y_size,
x_size,
num_tables,
learning_rate,
d_tt_cores[t].packed_accessor32<float, 3, RestrictPtrTraits>(),
tt_cores[t].packed_accessor32<float, 3, RestrictPtrTraits>());
}
}
return d_tt_cores;
}
std::vector<Tensor> tt_embeddings_backward_dense_cuda(
int32_t batch_count,
int32_t D,
const std::vector<int32_t>& tt_p_shapes,
const std::vector<int32_t>& tt_q_shapes,
const std::vector<int32_t>& tt_ranks,
Tensor L,
int32_t nnz,
Tensor colidx,
Tensor rowidx,
Tensor tableidx,
Tensor d_output,
std::vector<Tensor>& tt_cores) {
return tt_embeddings_backward_cuda(
OPTIM_DENSE,
batch_count,
D,
0.0,
0.0,
tt_p_shapes,
tt_q_shapes,
tt_ranks,
L,
nnz,
colidx,
rowidx,
tableidx,
d_output,
c10::nullopt,
tt_cores);
}
void tt_embeddings_backward_sgd_cuda(
int32_t batch_count,
int32_t D,
float learning_rate,
const std::vector<int32_t>& tt_p_shapes,
const std::vector<int32_t>& tt_q_shapes,
const std::vector<int32_t>& tt_ranks,
Tensor L,
int32_t nnz,
Tensor colidx,
Tensor rowidx,
Tensor tableidx,
Tensor d_output,
std::vector<Tensor>& tt_cores) {
tt_embeddings_backward_cuda(
OPTIM_SGD,
batch_count,
D,
learning_rate,
0.0,
tt_p_shapes,
tt_q_shapes,
tt_ranks,
L,
nnz,
colidx,
rowidx,
tableidx,
d_output,
c10::nullopt,
tt_cores);
}
void tt_embeddings_backward_adagrad_cuda(
int32_t batch_count,
int32_t D,
float learning_rate,
float eps,
const std::vector<int32_t>& tt_p_shapes,
const std::vector<int32_t>& tt_q_shapes,
const std::vector<int32_t>& tt_ranks,
Tensor L,
int32_t nnz,
Tensor colidx,
Tensor rowidx,
Tensor tableidx,
Tensor d_output,
std::vector<Tensor>& optimizer_state,
std::vector<Tensor>& tt_cores) {
tt_embeddings_backward_cuda(
OPTIM_ADAGRAD,
batch_count,
D,
learning_rate,
eps,
tt_p_shapes,
tt_q_shapes,
tt_ranks,
L,
nnz,
colidx,
rowidx,
tableidx,
d_output,
optimizer_state,
tt_cores);
}
__global__ void init_batch_gemm_forward_2T_kernel(
int N,
const int64_t* __restrict__ L,
const int64_t* __restrict__ colidx,
const int64_t* __restrict__ tableidx,
PackedTensorAccessor32<float, 3, RestrictPtrTraits> tt_cores_0,
PackedTensorAccessor32<float, 3, RestrictPtrTraits> tt_cores_1,
PackedTensorAccessor32<float, 2, RestrictPtrTraits> tr_0,
float** __restrict__ a_ptr,
float** __restrict__ b_ptr,
float** __restrict__ c_ptr) {
int32_t n = blockIdx.x * blockDim.x + threadIdx.x;
if (n < N) {
auto tidx = __ldg(&tableidx[n]);
auto cidx = __ldg(&colidx[n]);
auto tt_idx_0 = cidx / L[0];
cidx = cidx % L[0];
auto tt_idx_1 = cidx / L[1];
a_ptr[0 * N + n] = (float*)&(tt_cores_1[tidx][tt_idx_1][0]);
b_ptr[0 * N + n] = (float*)&(tt_cores_0[tidx][tt_idx_0][0]);
c_ptr[0 * N + n] = (float*)&(tr_0[n][0]);
}
}
__global__ void init_batch_gemm_forward_3T_kernel(
int N,
const int64_t* __restrict__ L,
const int64_t* __restrict__ colidx,
const int64_t* __restrict__ tableidx,
PackedTensorAccessor32<float, 3, RestrictPtrTraits> tt_cores_0,
PackedTensorAccessor32<float, 3, RestrictPtrTraits> tt_cores_1,
PackedTensorAccessor32<float, 3, RestrictPtrTraits> tt_cores_2,
PackedTensorAccessor32<float, 2, RestrictPtrTraits> tr_0,
PackedTensorAccessor32<float, 2, RestrictPtrTraits> tr_1,
float** __restrict__ a_ptr,
float** __restrict__ b_ptr,
float** __restrict__ c_ptr) {
int32_t n = blockIdx.x * blockDim.x + threadIdx.x;
if (n < N) {
auto tidx = __ldg(&tableidx[n]);
auto cidx = __ldg(&colidx[n]);
auto tt_idx_0 = cidx / L[0];
cidx = cidx % L[0];
auto tt_idx_1 = cidx / L[1];
cidx = cidx % L[1];
auto tt_idx_2 = cidx / L[2];
float* tr_0_ptr = (float*)&(tr_0[n][0]);
a_ptr[0 * N + n] = (float*)&(tt_cores_1[tidx][tt_idx_1][0]);
b_ptr[0 * N + n] = (float*)&(tt_cores_0[tidx][tt_idx_0][0]);
c_ptr[0 * N + n] = tr_0_ptr;
a_ptr[1 * N + n] = (float*)&(tt_cores_2[tidx][tt_idx_2][0]);
b_ptr[1 * N + n] = tr_0_ptr;
c_ptr[1 * N + n] = (float*)&(tr_1[n][0]);
}
}
__global__ void init_batch_gemm_forward_4T_kernel(
int N,
const int64_t* __restrict__ L,
const int64_t* __restrict__ colidx,
const int64_t* __restrict__ tableidx,
PackedTensorAccessor32<float, 3, RestrictPtrTraits> tt_cores_0,
PackedTensorAccessor32<float, 3, RestrictPtrTraits> tt_cores_1,
PackedTensorAccessor32<float, 3, RestrictPtrTraits> tt_cores_2,
PackedTensorAccessor32<float, 3, RestrictPtrTraits> tt_cores_3,
PackedTensorAccessor32<float, 2, RestrictPtrTraits> tr_0,
PackedTensorAccessor32<float, 2, RestrictPtrTraits> tr_1,
PackedTensorAccessor32<float, 2, RestrictPtrTraits> tr_2,
float** __restrict__ a_ptr,
float** __restrict__ b_ptr,
float** __restrict__ c_ptr) {
int32_t n = blockIdx.x * blockDim.x + threadIdx.x;
if (n < N) {
auto tidx = __ldg(&tableidx[n]);
auto cidx = __ldg(&colidx[n]);
auto tt_idx_0 = cidx / L[0];
cidx = cidx % L[0];
auto tt_idx_1 = cidx / L[1];
cidx = cidx % L[1];
auto tt_idx_2 = cidx / L[2];
cidx = cidx % L[2];
auto tt_idx_3 = cidx / L[3];
float* tr_0_ptr = (float*)&(tr_0[n][0]);
float* tr_1_ptr = (float*)&(tr_1[n][0]);
a_ptr[0 * N + n] = (float*)&(tt_cores_1[tidx][tt_idx_1][0]);
b_ptr[0 * N + n] = (float*)&(tt_cores_0[tidx][tt_idx_0][0]);
c_ptr[0 * N + n] = tr_0_ptr;
a_ptr[1 * N + n] = (float*)&(tt_cores_2[tidx][tt_idx_2][0]);
b_ptr[1 * N + n] = tr_0_ptr;
c_ptr[1 * N + n] = tr_1_ptr;
a_ptr[2 * N + n] = (float*)&(tt_cores_3[tidx][tt_idx_3][0]);
b_ptr[2 * N + n] = tr_1_ptr;
c_ptr[2 * N + n] = (float*)&(tr_2[n][0]);
}
}
void init_batch_gemm_forward_cuda(
int32_t T,
int32_t N,
const int64_t* __restrict__ L,
const int64_t* __restrict__ colidx,
const int64_t* __restrict__ tableidx,
const std::vector<Tensor>& tt_cores,
const std::vector<Tensor>& tr,
float** __restrict__ a_ptr,
float** __restrict__ b_ptr,
float** __restrict__ c_ptr) {
int32_t threads = (N > 256 ? 256 : 32);
int32_t num_blocks = (N + threads - 1) / threads;
if (T == 2) {
init_batch_gemm_forward_2T_kernel<<<
num_blocks,
threads,
0,
c10::cuda::getCurrentCUDAStream()>>>(
N,
L,
colidx,
tableidx,
tt_cores[0].packed_accessor32<float, 3, RestrictPtrTraits>(),
tt_cores[1].packed_accessor32<float, 3, RestrictPtrTraits>(),
tr[0].packed_accessor32<float, 2, RestrictPtrTraits>(),
a_ptr,
b_ptr,
c_ptr);
} else if (T == 3) {
init_batch_gemm_forward_3T_kernel<<<
num_blocks,
threads,
0,
c10::cuda::getCurrentCUDAStream()>>>(
N,
L,
colidx,
tableidx,
tt_cores[0].packed_accessor32<float, 3, RestrictPtrTraits>(),
tt_cores[1].packed_accessor32<float, 3, RestrictPtrTraits>(),
tt_cores[2].packed_accessor32<float, 3, RestrictPtrTraits>(),
tr[0].packed_accessor32<float, 2, RestrictPtrTraits>(),
tr[1].packed_accessor32<float, 2, RestrictPtrTraits>(),
a_ptr,
b_ptr,
c_ptr);
} else if (T == 4) {
init_batch_gemm_forward_4T_kernel<<<
num_blocks,
threads,
0,
c10::cuda::getCurrentCUDAStream()>>>(
N,
L,
colidx,
tableidx,
tt_cores[0].packed_accessor32<float, 3, RestrictPtrTraits>(),
tt_cores[1].packed_accessor32<float, 3, RestrictPtrTraits>(),
tt_cores[2].packed_accessor32<float, 3, RestrictPtrTraits>(),
tt_cores[3].packed_accessor32<float, 3, RestrictPtrTraits>(),
tr[0].packed_accessor32<float, 2, RestrictPtrTraits>(),
tr[1].packed_accessor32<float, 2, RestrictPtrTraits>(),
tr[2].packed_accessor32<float, 2, RestrictPtrTraits>(),
a_ptr,
b_ptr,
c_ptr);
}
}
__global__ void reduce_output_kernel(
int32_t N,
int32_t B,
int32_t D,
const int64_t* __restrict__ rowidx,
const int64_t* __restrict__ tableidx,
const float* __restrict__ tr_last,
float* __restrict__ output) {
int32_t indice_id = blockIdx.x * blockDim.y + threadIdx.y;
if (indice_id >= N) {
// don't have *warp* divergence since we launch full warps in blockDim.x,
// so we can just exit this warp entirely.
return;
}
// check if this warp is responsible for this whole segment.
bool segment_start =
(indice_id == 0 || rowidx[indice_id - 1] != rowidx[indice_id] ||
tableidx[indice_id - 1] != tableidx[indice_id]);
if (!segment_start) {
// don't have *warp* divergence since we launch full warps in blockDim.x,
// so we can just exit this warp entirely.
return;
}
int64_t row_index = rowidx[indice_id];
int64_t table_index = tableidx[indice_id];
// now, find the end of the segment (and thus the segment length `SL`).
int32_t SL = 1;
while (indice_id + SL < N && rowidx[indice_id + SL] == row_index &&
tableidx[indice_id + SL] == table_index) {
SL += 1;
}
for (int32_t d = threadIdx.x; d * 4 < D; d += blockDim.x) {
Vec4T<float> sum(&output[table_index * B * D + row_index * D + d * 4]);
for (int32_t sl = 0; sl < SL; ++sl) {
Vec4T<float> tr(&tr_last[(indice_id + sl) * D + d * 4]);
sum.acc.x += tr.acc.x;
sum.acc.y += tr.acc.y;
sum.acc.z += tr.acc.z;
sum.acc.w += tr.acc.w;
}
sum.store(&output[table_index * B * D + row_index * D + d * 4]);
}
}
Tensor tt_embeddings_forward_cuda(
int32_t batch_count,
int32_t num_tables,
int32_t B,
int32_t D,
const std::vector<int>& tt_p_shapes,
const std::vector<int>& tt_q_shapes,
const std::vector<int>& tt_ranks,
Tensor L,
int32_t nnz,
Tensor colidx,
Tensor rowidx,
Tensor tableidx,
const std::vector<Tensor>& tt_cores) {
at::cuda::OptionalCUDAGuard device_guard;
device_guard.set_index(rowidx.get_device());
int32_t T = tt_p_shapes.size();
auto output =
at::zeros({num_tables, B, D}, tt_cores[0].options().dtype(at::kFloat));
if (nnz == 0) {
return output;
}
TORCH_CHECK(batch_count > 0);
TORCH_CHECK(D > 0);
TORCH_CHECK(D % 4 == 0);
TORCH_CHECK(T > 0);
// batch gemm parameters
std::vector<int32_t> m(T - 1);
std::vector<int32_t> n(T - 1);
std::vector<int32_t> k(T - 1);
float alpha = 1.0;
float beta = 0.0;
int32_t m_ = tt_q_shapes[0];
for (int32_t t = 0; t < T - 1; ++t) {
m[t] = m_;
k[t] = tt_ranks[t + 1];
n[t] = tt_q_shapes[t + 1] * tt_ranks[t + 2];
m_ = m_ * tt_q_shapes[t + 1];
}
// allocate the immediate buffers
std::vector<Tensor> tr;
int32_t tr_size = tt_q_shapes[0] * tt_ranks[1];
for (int32_t t = 0; t < T - 1; ++t) {
tr_size = tr_size * tt_q_shapes[t + 1] * tt_ranks[t + 2] / tt_ranks[t + 1];
tr.push_back(at::empty(
{batch_count, tr_size}, tt_cores[0].options().dtype(at::kFloat)));
}
auto a_ptr_tensor = at::empty(
{(T - 1) * batch_count}, tt_cores[0].options().dtype(at::kLong));
auto b_ptr_tensor = at::empty(
{(T - 1) * batch_count}, tt_cores[0].options().dtype(at::kLong));
auto c_ptr_tensor = at::empty(
{(T - 1) * batch_count}, tt_cores[0].options().dtype(at::kLong));
float** a_ptr = (float**)a_ptr_tensor.data_ptr<int64_t>();
float** b_ptr = (float**)b_ptr_tensor.data_ptr<int64_t>();
float** c_ptr = (float**)c_ptr_tensor.data_ptr<int64_t>();
for (int32_t start_idx = 0; start_idx < nnz; start_idx += batch_count) {
int32_t end_idx =
start_idx + batch_count < nnz ? start_idx + batch_count : nnz;
int32_t N = end_idx - start_idx;
init_batch_gemm_forward_cuda(
T,
N,
L.data_ptr<int64_t>(),
&(colidx.data_ptr<int64_t>()[start_idx]),
&(tableidx.data_ptr<int64_t>()[start_idx]),
tt_cores,
tr,
a_ptr,
b_ptr,
c_ptr);
// batched GEMM
for (int32_t t = 0; t < T - 1; ++t) {
cuda_gemm_batched_fp32_fp32(
CUBLAS_OP_N,
CUBLAS_OP_N,
n[t],
m[t],
k[t],
&alpha,
(void**)&(a_ptr[t * N]),
n[t],
(void**)&(b_ptr[t * N]),
k[t],
&beta,
(void**)&(c_ptr[t * N]),
n[t],
N);
}
int32_t tx = kWarpSize;
int32_t ty = 1024 / tx;
dim3 threads(tx, ty);
int32_t num_blocks = (N + ty - 1) / ty;
reduce_output_kernel<<<
num_blocks,
threads,
0,
c10::cuda::getCurrentCUDAStream()>>>(
N,
B,
D,
&(rowidx.data_ptr<int64_t>()[start_idx]),
&(tableidx.data_ptr<int64_t>()[start_idx]),
tr[T - 2].data_ptr<float>(),
output.data_ptr<float>());
} // for (int start_idx = 0; start_idx < nnz; start_idx += batch_count)
return output;
}
__global__ void update_cache_state_kernel(
int N,
const int64_t* __restrict__ colidx,
int32_t hashtbl_size,
int64_t* __restrict__ hashtbl,
int64_t* __restrict__ cache_freq) {
int32_t n = blockIdx.x * blockDim.x + threadIdx.x;
if (n < N) {
int64_t cidx = __ldg(&colidx[n]);
hashtbl_insert<int64_t, int64_t, true>(
cidx, 1, hashtbl_size, MAX_PROBES, hashtbl, cache_freq);
}
}
void update_cache_state_cuda(Tensor colidx, Tensor hashtbl, Tensor cache_freq) {
at::cuda::OptionalCUDAGuard device_guard;
device_guard.set_index(colidx.get_device());
int32_t nnz = colidx.numel();
if (nnz == 0) {
return;
}
TORCH_CHECK(hashtbl.numel() > 0);
TORCH_CHECK(hashtbl.numel() == cache_freq.numel());
int32_t threads = (nnz > 256 ? 256 : 32);
int32_t num_blocks = (nnz + threads - 1) / threads;
update_cache_state_kernel<<<
num_blocks,
threads,
0,
c10::cuda::getCurrentCUDAStream()>>>(
nnz,
colidx.data_ptr<int64_t>(),
hashtbl.numel(),
hashtbl.data_ptr<int64_t>(),
cache_freq.data_ptr<int64_t>());
}
__global__ void mark_popular_colidx_kernel(
int32_t hashtbl_size,
int32_t cache_size,
int64_t* __restrict__ cache_freq_sorted_hashtbl,
int64_t* __restrict__ hashtbl,
int64_t* __restrict__ cache_freq,
int32_t* __restrict__ cache_state) {
int32_t n = blockIdx.x * blockDim.x + threadIdx.x;
if (n >= hashtbl_size) {
return;
}
if (cache_freq_sorted_hashtbl[n] != -1) {
int32_t hashtbl_idx = hashtbl_find(
cache_freq_sorted_hashtbl[n], hashtbl_size, MAX_PROBES, hashtbl);
if (n < cache_size) {
cache_state[hashtbl_idx] = n;
} else {
hashtbl[hashtbl_idx] = -1;
cache_freq[hashtbl_idx] = 0;
}
} else if (n < cache_size) {
// a hack to use batch gemm
cache_freq_sorted_hashtbl[n] = 0;
}
}
__global__ void copy_output_kernel(
int32_t N,
int32_t D,
int32_t start_idx,
const float* __restrict__ tr_last,
float* __restrict__ output) {
int32_t n = blockIdx.x * blockDim.y + threadIdx.y;
if (n < N) {
for (int32_t d = threadIdx.x; d * 4 < D; d += blockDim.x) {
Vec4T<float> tr(&tr_last[n * D + d * 4]);
tr.store(&output[(start_idx + n) * D + d * 4]);
}
}
}
void prefetch_cached_weights_cuda(
int32_t batch_count,
const std::vector<int>& tt_p_shapes,
const std::vector<int>& tt_q_shapes,
const std::vector<int>& tt_ranks,
const std::vector<Tensor>& tt_cores,
Tensor L,
Tensor cache_freq_sorted_hashtbl,
Tensor cache_weight) {
at::cuda::OptionalCUDAGuard device_guard;
device_guard.set_index(cache_weight.get_device());
int32_t nnz = cache_weight.size(0);
if (nnz == 0) {
return;
}
int32_t T = tt_p_shapes.size();
int32_t D = cache_weight.size(1);
TORCH_CHECK(batch_count > 0);
TORCH_CHECK(D > 0);
TORCH_CHECK(D % 4 == 0);
TORCH_CHECK(T > 0);
// batch gemm parameters
std::vector<int32_t> m(T - 1);
std::vector<int32_t> n(T - 1);
std::vector<int32_t> k(T - 1);
float alpha = 1.0;
float beta = 0.0;
int32_t m_ = tt_q_shapes[0];
for (int32_t t = 0; t < T - 1; ++t) {
m[t] = m_;
k[t] = tt_ranks[t + 1];
n[t] = tt_q_shapes[t + 1] * tt_ranks[t + 2];
m_ = m_ * tt_q_shapes[t + 1];
}
// allocate the immediate buffers
std::vector<Tensor> tr;
int32_t tr_size = tt_q_shapes[0] * tt_ranks[1];
for (int32_t t = 0; t < T - 1; ++t) {
tr_size = tr_size * tt_q_shapes[t + 1] * tt_ranks[t + 2] / tt_ranks[t + 1];
tr.push_back(at::empty(
{batch_count, tr_size}, tt_cores[0].options().dtype(at::kFloat)));
}
auto a_ptr_tensor = at::empty(
{(T - 1) * batch_count}, tt_cores[0].options().dtype(at::kLong));
auto b_ptr_tensor = at::empty(
{(T - 1) * batch_count}, tt_cores[0].options().dtype(at::kLong));
auto c_ptr_tensor = at::empty(
{(T - 1) * batch_count}, tt_cores[0].options().dtype(at::kLong));
float** a_ptr = (float**)a_ptr_tensor.data_ptr<int64_t>();
float** b_ptr = (float**)b_ptr_tensor.data_ptr<int64_t>();
float** c_ptr = (float**)c_ptr_tensor.data_ptr<int64_t>();
Tensor tableidx = zeros_like(cache_freq_sorted_hashtbl);
for (int32_t start_idx = 0; start_idx < nnz; start_idx += batch_count) {
int32_t end_idx =
start_idx + batch_count < nnz ? start_idx + batch_count : nnz;
int32_t N = end_idx - start_idx;
init_batch_gemm_forward_cuda(
T,
N,
L.data_ptr<int64_t>(),
&(cache_freq_sorted_hashtbl.data_ptr<int64_t>()[start_idx]),
&(tableidx.data_ptr<int64_t>()[start_idx]),
tt_cores,
tr,
a_ptr,
b_ptr,
c_ptr);
// batched GEMM
for (int32_t t = 0; t < T - 1; ++t) {
cuda_gemm_batched_fp32_fp32(
CUBLAS_OP_N,
CUBLAS_OP_N,
n[t],
m[t],
k[t],
&alpha,
(void**)&(a_ptr[t * N]),
n[t],
(void**)&(b_ptr[t * N]),
k[t],
&beta,
(void**)&(c_ptr[t * N]),
n[t],
N);
}
int32_t tx = std::min(1024, D / 4);
int32_t ty = 1024 / tx;
dim3 threads(tx, ty);
int32_t num_blocks = (N + ty - 1) / ty;
copy_output_kernel<<<
num_blocks,
threads,
0,
c10::cuda::getCurrentCUDAStream()>>>(
N,
D,
start_idx,
tr[T - 2].data_ptr<float>(),
cache_weight.data_ptr<float>());
} // for (int start_idx = 0; start_idx < nnz; start_idx += batch_count)
}
void cache_populate_cuda(
int64_t num_embeddings,
const std::vector<int>& tt_p_shapes,
const std::vector<int>& tt_q_shapes,
const std::vector<int>& tt_ranks,
const std::vector<Tensor>& tt_cores,
Tensor L,
Tensor hashtbl,
Tensor cache_freq,
Tensor cache_state,
Tensor cache_weight) {
TORCH_CHECK(hashtbl.numel() > 0);
TORCH_CHECK(hashtbl.numel() == cache_freq.numel());
TORCH_CHECK(cache_freq.numel() < std::numeric_limits<int32_t>::max());
TORCH_CHECK(hashtbl.numel() >= cache_weight.size(0));
auto cache_freq_sorted_hashtbl = empty_like(hashtbl);
// Sort hash_table by cache_freq
{
auto sorted_cache_freq = empty_like(cache_freq);
size_t temp_storage_bytes = 0;
AT_CUDA_CHECK(cub::DeviceRadixSort::SortPairsDescending(
nullptr,
temp_storage_bytes,
cache_freq.data_ptr<int64_t>(),
sorted_cache_freq.data_ptr<int64_t>(),
hashtbl.data_ptr<int64_t>(),
cache_freq_sorted_hashtbl.data_ptr<int64_t>(),
cache_freq.numel(),
0,
sizeof(int64_t) * 8,
at::cuda::getCurrentCUDAStream(),
false));
auto temp_storage = at::empty(
{static_cast<int64_t>(temp_storage_bytes)},
hashtbl.options().dtype(kByte));
AT_CUDA_CHECK(cub::DeviceRadixSort::SortPairsDescending(
temp_storage.data_ptr(),
temp_storage_bytes,
cache_freq.data_ptr<int64_t>(),
sorted_cache_freq.data_ptr<int64_t>(),
hashtbl.data_ptr<int64_t>(),
cache_freq_sorted_hashtbl.data_ptr<int64_t>(),
cache_freq.numel(),
0,
sizeof(int64_t) * 8,
at::cuda::getCurrentCUDAStream(),
false));
}
// Mark popular colidx
int32_t hashtbl_size = hashtbl.numel();
int32_t threads = 256;
int32_t num_blocks = (hashtbl_size + threads - 1) / threads;
mark_popular_colidx_kernel<<<
num_blocks,
threads,
0,
c10::cuda::getCurrentCUDAStream()>>>(
hashtbl_size,
cache_weight.size(0),
cache_freq_sorted_hashtbl.data_ptr<int64_t>(),
hashtbl.data_ptr<int64_t>(),
cache_freq.data_ptr<int64_t>(),
cache_state.data_ptr<int32_t>());
int32_t batch_count = 200;
prefetch_cached_weights_cuda(
batch_count,
tt_p_shapes,
tt_q_shapes,
tt_ranks,
tt_cores,
L,
cache_freq_sorted_hashtbl,
cache_weight);
}
__global__ void compute_rowidx_kernel(
int32_t B,
int32_t num_tables,
const int64_t* __restrict__ offsets,
int64_t* __restrict__ rowidx,
int64_t* __restrict__ tableidx) {
int32_t b = blockIdx.x * blockDim.y + threadIdx.y;
if (b < B * num_tables) {
int64_t colidx_start = offsets[b];
int64_t colidx_end = offsets[b + 1];
int32_t L = colidx_end - colidx_start;
for (int32_t l = threadIdx.x; l < L; l += blockDim.x) {
rowidx[l + colidx_start] = b % B;
tableidx[l + colidx_start] = b / B;
}
}
}
__global__ void cache_lookup_kernel(
int32_t N,
const int64_t* __restrict__ colidx,
int32_t hashtbl_size,
const int64_t* __restrict__ hashtbl,
const int32_t* __restrict__ cache_state,
bool* __restrict__ is_tt,
int32_t* __restrict__ cache_location) {
int32_t n = blockIdx.x * blockDim.x + threadIdx.x;
if (n < N) {
int32_t hashtbl_idx =
hashtbl_find(colidx[n], hashtbl_size, MAX_PROBES, hashtbl);
if (hashtbl_idx != -1 && cache_state[hashtbl_idx] != -1) {
is_tt[n] = false;
cache_location[n] = cache_state[hashtbl_idx];
} else {
is_tt[n] = true;
}
}
}
std::tuple<Tensor, Tensor, Tensor, int32_t, c10::optional<Tensor>>
preprocess_indices_sync_cuda(
Tensor colidx,
Tensor offsets,
int32_t num_tables,
bool warmup,
Tensor hashtbl,
Tensor cache_state) {
at::cuda::OptionalCUDAGuard device_guard;
device_guard.set_index(colidx.get_device());
auto rowidx = empty_like(colidx);
auto tableidx = empty_like(colidx);
if (rowidx.numel() == 0) {
return {colidx, rowidx, tableidx, rowidx.numel(), c10::nullopt};
}
int32_t B = (offsets.numel() - 1) / num_tables;
int32_t N = colidx.numel();
int32_t num_rows = offsets.numel() - 1;
int32_t tx = 8;
int32_t ty = 32;
compute_rowidx_kernel<<<
div_round_up(num_rows, ty),
dim3(tx, ty),
0,
c10::cuda::getCurrentCUDAStream()>>>(
B,
num_tables,
offsets.data_ptr<int64_t>(),
rowidx.data_ptr<int64_t>(),
tableidx.data_ptr<int64_t>());
if (warmup || num_tables != 1) {
// if in warmup phase or num_tables != 1, we do not lookup cache
return {colidx, rowidx, tableidx, rowidx.numel(), c10::nullopt};
} else {
auto partitioned_colidx = empty_like(colidx);
auto partitioned_rowidx = empty_like(rowidx);
auto num_tt_indices = zeros({1}, rowidx.options().dtype(kInt));
auto cache_locations = empty_like(rowidx, rowidx.options().dtype(kInt));
auto partitioned_cache_locations =
empty_like(rowidx, rowidx.options().dtype(kInt));
{
auto is_tt = empty_like(rowidx, rowidx.options().dtype(kBool));
int32_t threads = 256;
int32_t num_blocks = div_round_up(N, threads);
cache_lookup_kernel<<<
num_blocks,
threads,
0,
c10::cuda::getCurrentCUDAStream()>>>(
N,
colidx.data_ptr<int64_t>(),
hashtbl.numel(),
hashtbl.data_ptr<int64_t>(),
cache_state.data_ptr<int32_t>(),
is_tt.data_ptr<bool>(),
cache_locations.data_ptr<int32_t>());
size_t temp_storage_bytes = 0;
AT_CUDA_CHECK(cub::DevicePartition::Flagged(
nullptr,
temp_storage_bytes,
rowidx.data_ptr<int64_t>(),
is_tt.data_ptr<bool>(),
partitioned_rowidx.data_ptr<int64_t>(),
num_tt_indices.data_ptr<int32_t>(),
rowidx.numel(),
at::cuda::getCurrentCUDAStream(),
false));
auto temp_storage = at::empty(
{static_cast<int64_t>(temp_storage_bytes)},
hashtbl.options().dtype(kByte));
AT_CUDA_CHECK(cub::DevicePartition::Flagged(
temp_storage.data_ptr(),
temp_storage_bytes,
rowidx.data_ptr<int64_t>(),
is_tt.data_ptr<bool>(),
partitioned_rowidx.data_ptr<int64_t>(),
num_tt_indices.data_ptr<int32_t>(),
rowidx.numel(),
at::cuda::getCurrentCUDAStream(),
false));
AT_CUDA_CHECK(cub::DevicePartition::Flagged(
temp_storage.data_ptr(),
temp_storage_bytes,
colidx.data_ptr<int64_t>(),
is_tt.data_ptr<bool>(),
partitioned_colidx.data_ptr<int64_t>(),
num_tt_indices.data_ptr<int32_t>(),
colidx.numel(),
at::cuda::getCurrentCUDAStream(),
false));
AT_CUDA_CHECK(cub::DevicePartition::Flagged(
temp_storage.data_ptr(),
temp_storage_bytes,
cache_locations.data_ptr<int32_t>(),
is_tt.data_ptr<bool>(),
partitioned_cache_locations.data_ptr<int32_t>(),
num_tt_indices.data_ptr<int32_t>(),
cache_locations.numel(),
at::cuda::getCurrentCUDAStream(),
false));
}
int32_t N_tt_indices;
cudaMemcpyAsync(
&N_tt_indices,
num_tt_indices.data_ptr<int32_t>(),
sizeof(int32_t),
cudaMemcpyDeviceToHost,
at::cuda::getCurrentCUDAStream());
cudaStreamSynchronize(at::cuda::getCurrentCUDAStream());
return {
partitioned_colidx,
partitioned_rowidx,
tableidx,
N_tt_indices,
partitioned_cache_locations};
}
}
__global__ void cache_forward_kernel(
int32_t nnz,
int32_t D,
const int64_t* __restrict__ rowidx,
const int32_t* __restrict__ cache_locations,
const float* __restrict__ cache_weight,
float* __restrict__ output) {
int32_t indice_id = blockIdx.x * blockDim.y + threadIdx.y;
if (indice_id >= nnz) {
// don't have *warp* divergence since we launch full warps in blockDim.x,
// so we can just exit this warp entirely.
return;
}
// check if this warp is responsible for this whole segment.
bool segment_start =
(indice_id == 0 || rowidx[indice_id - 1] != rowidx[indice_id]);
if (!segment_start) {
// don't have *warp* divergence since we launch full warps in blockDim.x,
// so we can just exit this warp entirely.
return;
}
int64_t row_index = rowidx[indice_id];
// now, find the end of the segment (and thus the segment length `SL`).
int32_t SL = 1;
while (indice_id + SL < nnz && rowidx[indice_id + SL] == row_index) {
SL += 1;
}
for (int32_t d = threadIdx.x; d * 4 < D; d += blockDim.x) {
Vec4T<float> sum(&output[row_index * D + d * 4]);
for (int32_t sl = 0; sl < SL; ++sl) {
int32_t idx = __ldg(&cache_locations[indice_id + sl]);
Vec4T<float> weight(&cache_weight[idx * D + d * 4]);
sum.acc.x += weight.acc.x;
sum.acc.y += weight.acc.y;
sum.acc.z += weight.acc.z;
sum.acc.w += weight.acc.w;
}
sum.store(&output[row_index * D + d * 4]);
}
}
void cache_forward_cuda(
int32_t B,
int32_t nnz,
Tensor cache_locations,
Tensor rowidx,
Tensor cache_weight,
Tensor output) {
at::cuda::OptionalCUDAGuard device_guard;
device_guard.set_index(rowidx.get_device());
TORCH_CHECK(B > 0);
int32_t D = cache_weight.size(1);
TORCH_CHECK(D > 0);
TORCH_CHECK(D % 4 == 0);
if (nnz == 0) {
return;
}
int32_t tx = kWarpSize;
int32_t ty = 1024 / tx;
dim3 threads(tx, ty);
int32_t num_blocks = (nnz + ty - 1) / ty;
cache_forward_kernel<<<
num_blocks,
threads,
0,
c10::cuda::getCurrentCUDAStream()>>>(
nnz,
D,
rowidx.data_ptr<int64_t>(),
cache_locations.data_ptr<int32_t>(),
cache_weight.data_ptr<float>(),
output.data_ptr<float>());
}
__global__ void cache_backward_sgd_kernel(
int32_t nnz,
int32_t D,
const float* __restrict__ grad_output,
const int32_t* __restrict__ cache_locations,
const int64_t* __restrict__ rowidx,
float learning_rate,
float* __restrict__ cache_weight) {
int32_t indice_id = blockIdx.x * blockDim.y + threadIdx.y;
if (indice_id >= nnz) {
// don't have *warp* divergence since we launch full warps in blockDim.x,
// so we can just exit this warp entirely.
return;
}
// check if this warp is responsible for this whole segment.
bool segment_start =
(indice_id == 0 || rowidx[indice_id - 1] != rowidx[indice_id]);
if (!segment_start) {
// don't have *warp* divergence since we launch full warps in blockDim.x,
// so we can just exit this warp entirely.
return;
}
int64_t row_index = rowidx[indice_id];
// now, find the end of the segment (and thus the segment length `SL`).
int32_t SL = 1;
while (indice_id + SL < nnz && rowidx[indice_id + SL] == row_index) {
SL += 1;
}
for (int32_t sl = 0; sl < SL; ++sl) {
int32_t idx = __ldg(&cache_locations[indice_id + sl]);
for (int32_t d = threadIdx.x; d * 4 < D; d += blockDim.x) {
Vec4T<acc_type<float, true>> grad_out_vec(
&grad_output[row_index * D + d * 4]);
gpuAtomicAdd(
&cache_weight[idx * D + d * 4 + 0],
-grad_out_vec.acc.x * learning_rate);
gpuAtomicAdd(
&cache_weight[idx * D + d * 4 + 1],
-grad_out_vec.acc.y * learning_rate);
gpuAtomicAdd(
&cache_weight[idx * D + d * 4 + 2],
-grad_out_vec.acc.z * learning_rate);
gpuAtomicAdd(
&cache_weight[idx * D + d * 4 + 3],
-grad_out_vec.acc.w * learning_rate);
}
}
}
void cache_backward_sgd_cuda(
int32_t nnz,
Tensor grad_output,
Tensor cache_locations,
Tensor rowidx,
float learning_rate,
Tensor cache_weight) {
at::cuda::OptionalCUDAGuard device_guard;
device_guard.set_index(cache_weight.get_device());
if (nnz == 0) {
return;
}
const auto D = cache_weight.size(1);
TORCH_CHECK(D > 0);
TORCH_CHECK(D % 4 == 0);
int32_t tx = kWarpSize;
int32_t ty = 1024 / tx;
dim3 threads(tx, ty);
int32_t num_blocks = div_round_up(nnz, ty);
cache_backward_sgd_kernel<<<
num_blocks,
threads,
0,
at::cuda::getCurrentCUDAStream()>>>(
nnz,
D,
grad_output.data_ptr<float>(),
cache_locations.data_ptr<int32_t>(),
rowidx.data_ptr<int64_t>(),
learning_rate,
cache_weight.data_ptr<float>());
AT_CUDA_CHECK(cudaGetLastError());
return;
}
__global__ void cache_backward_dense_kernel(
int32_t nnz,
int32_t D,
const float* __restrict__ grad_output,
const int32_t* __restrict__ cache_locations,
const int64_t* __restrict__ rowidx,
float* __restrict__ grad_cache_weight) {
int32_t indice_id = blockIdx.x * blockDim.y + threadIdx.y;
if (indice_id >= nnz) {
// don't have *warp* divergence since we launch full warps in blockDim.x,
// so we can just exit this warp entirely.
return;
}
// check if this warp is responsible for this whole segment.
bool segment_start =
(indice_id == 0 || rowidx[indice_id - 1] != rowidx[indice_id]);
if (!segment_start) {
// don't have *warp* divergence since we launch full warps in blockDim.x,
// so we can just exit this warp entirely.
return;
}
int64_t row_index = rowidx[indice_id];
// now, find the end of the segment (and thus the segment length `SL`).
int32_t SL = 1;
while (indice_id + SL < nnz && rowidx[indice_id + SL] == row_index) {
SL += 1;
}
for (int32_t sl = 0; sl < SL; ++sl) {
int32_t idx = __ldg(&cache_locations[indice_id + sl]);
for (int32_t d = threadIdx.x; d * 4 < D; d += blockDim.x) {
Vec4T<acc_type<float, true>> grad_out_vec(
&grad_output[row_index * D + d * 4]);
gpuAtomicAdd(&grad_cache_weight[idx * D + d * 4 + 0], grad_out_vec.acc.x);
gpuAtomicAdd(&grad_cache_weight[idx * D + d * 4 + 1], grad_out_vec.acc.y);
gpuAtomicAdd(&grad_cache_weight[idx * D + d * 4 + 2], grad_out_vec.acc.z);
gpuAtomicAdd(&grad_cache_weight[idx * D + d * 4 + 3], grad_out_vec.acc.w);
}
}
}
Tensor cache_backward_dense_cuda(
int32_t nnz,
Tensor grad_output,
Tensor cache_locations,
Tensor rowidx,
float learning_rate,
Tensor cache_weight) {
at::cuda::OptionalCUDAGuard device_guard;
device_guard.set_index(cache_weight.get_device());
auto grad_cache_weight = zeros_like(cache_weight);
if (nnz == 0) {
return grad_cache_weight;
}
const auto D = cache_weight.size(1);
TORCH_CHECK(D > 0);
TORCH_CHECK(D % 4 == 0);
int32_t tx = kWarpSize;
int32_t ty = 1024 / tx;
dim3 threads(tx, ty);
int32_t num_blocks = div_round_up(nnz, ty);
cache_backward_dense_kernel<<<
num_blocks,
threads,
0,
at::cuda::getCurrentCUDAStream()>>>(
nnz,
D,
grad_output.data_ptr<float>(),
cache_locations.data_ptr<int32_t>(),
rowidx.data_ptr<int64_t>(),
grad_cache_weight.data_ptr<float>());
AT_CUDA_CHECK(cudaGetLastError());
return grad_cache_weight;
}
__global__ void cache_backward_rowwise_adagrad_approx_kernel(
int32_t nnz,
int32_t D,
const float* __restrict__ grad_output,
const int32_t* __restrict__ cache_locations,
const int64_t* __restrict__ rowidx,
float learning_rate,
float eps,
float* __restrict__ cache_optimizer_state,
float* __restrict__ cache_weight) {
int32_t indice_id = blockIdx.x * blockDim.y + threadIdx.y;
if (indice_id >= nnz) {
// don't have *warp* divergence since we launch full warps in blockDim.x,
// so we can just exit this warp entirely.
return;
}
// check if this warp is responsible for this whole segment.
bool segment_start =
(indice_id == 0 || rowidx[indice_id - 1] != rowidx[indice_id]);
if (!segment_start) {
// don't have *warp* divergence since we launch full warps in blockDim.x,
// so we can just exit this warp entirely.
return;
}
int64_t row_index = rowidx[indice_id];
// now, find the end of the segment (and thus the segment length `SL`).
int32_t SL = 1;
while (indice_id + SL < nnz && rowidx[indice_id + SL] == row_index) {
SL += 1;
}
float g_local_sum_square = 0.0;
for (int32_t d = threadIdx.x; d * 4 < D; d += blockDim.x) {
Vec4T<float> grad_out_vec(&grad_output[row_index * D + d * 4]);
g_local_sum_square += grad_out_vec.acc.x * grad_out_vec.acc.x +
grad_out_vec.acc.y * grad_out_vec.acc.y +
grad_out_vec.acc.z * grad_out_vec.acc.z +
grad_out_vec.acc.w * grad_out_vec.acc.w;
}
float g_avg_square = warpReduceAllSum<float>(g_local_sum_square) / D;
for (int32_t sl = 0; sl < SL; ++sl) {
auto idx = __ldg(&cache_locations[indice_id + sl]);
float multiplier;
if (threadIdx.x == 0) {
float old_sum_square_grads =
gpuAtomicAdd(&cache_optimizer_state[idx], g_avg_square);
multiplier = learning_rate *
(1.0 / (sqrt(old_sum_square_grads + g_avg_square) + eps));
}
multiplier = __shfl_sync(0xFFFFFFFF, multiplier, 0);
for (int32_t d = threadIdx.x; d * 4 < D; d += blockDim.x) {
Vec4T<float> grad_out_vec(&grad_output[row_index * D + d * 4]);
Vec4T<float> weight_new(&cache_weight[idx * D + d * 4]);
weight_new.acc.x -= grad_out_vec.acc.x * multiplier;
weight_new.acc.y -= grad_out_vec.acc.y * multiplier;
weight_new.acc.z -= grad_out_vec.acc.z * multiplier;
weight_new.acc.w -= grad_out_vec.acc.w * multiplier;
weight_new.store(&cache_weight[idx * D + d * 4]);
}
}
}
void cache_backward_rowwise_adagrad_approx_cuda(
int32_t nnz,
Tensor grad_output,
Tensor cache_locations,
Tensor rowidx,
float learning_rate,
float eps,
Tensor cache_optimizer_state,
Tensor cache_weight) {
at::cuda::OptionalCUDAGuard device_guard;
device_guard.set_index(cache_weight.get_device());
if (nnz == 0) {
return;
}
const auto D = cache_weight.size(1);
TORCH_CHECK(D > 0);
TORCH_CHECK(D % 4 == 0);
int32_t tx = kWarpSize;
int32_t ty = 1024 / tx;
dim3 threads(tx, ty);
int32_t num_blocks = div_round_up(nnz, ty);
cache_backward_rowwise_adagrad_approx_kernel<<<
num_blocks,
threads,
0,
at::cuda::getCurrentCUDAStream()>>>(
nnz,
D,
grad_output.data_ptr<float>(),
cache_locations.data_ptr<int32_t>(),
rowidx.data_ptr<int64_t>(),
learning_rate,
eps,
cache_optimizer_state.data_ptr<float>(),
cache_weight.data_ptr<float>());
AT_CUDA_CHECK(cudaGetLastError());
}
|
the_stack
|
#include "nnbnorm.hpp"
#include "datacu.hpp"
#include "impl/blashelper.hpp"
#include "impl/sharedmem.cuh"
#include <cassert>
#include <cstdint>
#include <cfloat>
// -------------------------------------------------------------------
// Helpers
// -------------------------------------------------------------------
/*
# Overview
Batch normalization accumulates statistics for each feature channel
by summing across spatial locations and instances. Spatial locations
are contiguous in memory, but there is a gap when moving from an image
to the next.
The GPU runs in parallel blocks of threads (typically of 512 elements).
In an efficient implementation, the thread in a block operate in parallel
on blocks 512 consecutive elements, performing identical operations. For efficient
memory access, furthermore, the 512 memory blocks must be aligned.
Thus a thread block of sie bs should read consecutive blocks
of memory locations as follows:
0 bs 2bs 3bs
+block+-----+-----+-----+-----+-----+-----+-----+-----+-----+
Howver, feature planes do not align to memory block boundaries and
there is a gaps when the next instance/image is visited.
The data that needs to be summed over looks like this:
0 bs 2bs 3bs
+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+
pstart +psize + 1pstride
*--plane--* -gap- *---------*
We program the thread block to visit all the memory blocks necessary to
cover all the palens. As each block is applied to the data, some
threads may occasionally discard reads that are outside the required
range:
0 bs 2bs 3bs
+xxx--+-----+--xxx+ + +xxx--+-----+--xxx+ + +
pstart +psize + 1pstride
*---------* *---------*
We use a simple algorithm to peform this visit. Let p=0,1,... be the plane
index and b=0,1,... the blcok index.
1. Begin with p=0.
2. Find the first memory block b that overlaps with the plane p:
b = pstart / bs.
3. Each thread in the thread block reads a certain element in memory block b.
If this element is outside plane p it is ignored, otherwise it is accumulated.
4. If the block ends beyon the last element of the plane, i.e. if
b * bs + bs >= p * pstride + pstart + psize,
then the plane has been fully read: we increment p
and continue form (2). Otherwise we increase b to read the next few
elements of the current plane p.
This algorithm considers all planes in seuqence and visits each
one of their elements only once. Note that this works for any combination
of block size, plane offset, plane size, and plane stride. For example,
if planse are smaller than blocks, the same block will simply be
read multiple times, each time discarding different elements.
## Detailed block-plane association
In the scheme above, one thread block is responsible for accumulating
data for a single feature channel, across planes and instances/images.
Hence in these scheme numBlocks = numChannels. In pratice, it can
be preferable to increase the nubmer of blocks, particularly when
the batch is large, to allow more work to progress in parallel.
In order to do so, we use numBlocks = M numChannels, where M >= 1
is a multiplier. Thread block number tb operates on
channel = tb % numChannels.
and on images
(tb / numChannels) * M + [0 ... M-1],
In this manner, images are divided in numBlocksPerChannel = ceil(numImages / M),
and statistics are computed in parallel for each chunk.
# Reduction
Once all thread blocks are complete, partial sums for each feature
channel must be aggregated. This means summing at the level of:
1. Warps (up to 32 threads). A thread warp is a subst of highly coupled thread within
a thread block. Threads are *coalesced* and
run essentially in a single stream of vector instructions on the GPU,
which also means that they stay syncrhonized implicitly. Threads
in a warp write to the same shared memory area; this is reduced
by performing a hierarchical summation.
2. Blocks (up to 512 threads). Thread blocks are assigned to a SM,
and the SM breaks them down into warps for execution.
Threads in the same block can be synchronised explicity using __syncthreads().
They all run concurrently in the same SM and write to the same
shared memory area like the warps. Reduction is also hierarchical,
but must use __syncthreads().
3. Chunks: The numBlocksPerChannel partial results for each feature channel
must be aggregated. This is done by storing partial results in a
numChunk elements vector in global memory and running a GPU
kernel to collapse it.
## Hierarchial reduction
This is used to accumualte a vector v[0], ..., v[blockSize-1]
stored in the shared memory ara of a thread block.
This occurrs in stages, each time collapsing elements at a distance
blockSize/2^k. In particular, each thread t in the block does:
t=0,...,blockSize/2^k: v[t] = v[t] + v[t + blockSize/2^k].
Threads outside the active range do nothing. When k=log2(blockSize),
in particular, thread 0 does:
t=0: v[0] = v[0] + v[1]
which is the last summation in the reduction.
Every time the thread block performs a summation, the block must be
synchronized. There are two regimes:
1. When blockSize/2^k <=0 warpSize, snycrhonization is implicit as
threads t only span a single warp.
2. When blockSize/2^k, we must ad __synchtreads() after
the summation.
## Choosing the number of blocks
Each channel is processed by one or more blocks.
There are numBlocksPerChannel >= 1 blocks per channel, each working
on a subset of one or more images. There are
numBlocks = numBlocksPerChannel * numChannels
blocks in the grid.
We select numBlocksPerChannel to satisfy the following constraints:
1. There must be at least one block per channel:
numBlocksPerChannel >= 1.
2. There must be at most one block per image:
numBlocksPerChannel <= size.
3. The grid size must be less than 65536 (CUDA limit)
numBlocksPerChannel <= 65536 / numChannels.
Note that constraints (1) and (3) can be satisfied only if
numChannels <= 65536. This is usually not a problem, but may fail
in general.
In general, (1--3) can be satisfied by setting numBlocksPerChannel=1.
However, this is suboptimal if there are too many operations
per block.
We would like to do at most
L = 10e3 * blockSize
operations per block and each block does
(planeArea * size)/numBlocksPerChannel
operation. Thus the target value for numBlocksPerChannel is
numBlocksPerChannel = ceil((planeArea * size) / L).
*/
// MSB_WARP = log2(WARP_SIZE)
#define WARP_SIZE 32
#define MSB_WARP 5
// macro function
#define min(a,b) (a > b ? b : a);
static inline int getBlockSize(int dataSize)
{
int blockSize = VL_CUDA_NUM_THREADS / 2 ;
if (dataSize < blockSize) {
unsigned int numWarps = dataSize / WARP_SIZE ;
if (numWarps < 4) {
blockSize = 2 * WARP_SIZE ;
}
else if (numWarps < 8) {
blockSize = 4 * WARP_SIZE ;
}
else {
blockSize = 8 * WARP_SIZE ;
}
}
return blockSize ;
}
// get the smallest x which is a multiple of factor
static inline int nextMultipleOf(int x, int factor)
{
return factor * ((x + factor - 1)/factor) ;
}
template<typename T>
__forceinline__ __device__ void blockReduce(volatile T * mdata,
unsigned int tid,
unsigned int blockSize,
unsigned int maxDataSize)
{
// todo: get rid of maxDataSize?
__syncthreads();
if (blockSize >= 1024 && maxDataSize + WARP_SIZE >=512) { if (tid < 512) { mdata[tid] += mdata[tid + 512]; } __syncthreads(); } // mdata[0:511] = mdata[0:511] + mdata[512:1023]
if (blockSize >= 512 && maxDataSize + WARP_SIZE >=256) { if (tid < 256) { mdata[tid] += mdata[tid + 256]; } __syncthreads(); } // mdata[0:255] = mdata[0:255] + mdata[256:511]
if (blockSize >= 256 && maxDataSize + WARP_SIZE >=128) { if (tid < 128) { mdata[tid] += mdata[tid + 128]; } __syncthreads(); } // mdata[0:127] = mdata[0:127] + mdata[128:255]
if (blockSize >= 128 && maxDataSize + WARP_SIZE >=64 ) { if (tid < 64) { mdata[tid] += mdata[tid + 64]; } __syncthreads(); } // mdata[0:63] = mdata[0:63] + mdata[64:127]
if (tid < 32) {
// now enter warp
if (blockSize >= 64) { mdata[tid] += mdata[tid + 32]; } // mdata[0:31] = mdata[0:31] + mdata[32:63]
if (blockSize >= 32) { mdata[tid] += mdata[tid + 16]; } // mdata[0:15] = mdata[0:15] + mdata[16:31]
if (blockSize >= 16) { mdata[tid] += mdata[tid + 8]; } // mdata[0:7] = mdata[0:7] + mdata[7:15]
if (blockSize >= 8) { mdata[tid] += mdata[tid + 4]; } // mdata[0:3] = mdata[0:3] + mdata[4:7]
if (blockSize >= 4) { mdata[tid] += mdata[tid + 2]; } // mdata[0:1] = mdata[0:1] + mdata[2:3]
if (blockSize >= 2) { mdata[tid] += mdata[tid + 1]; } // mdata[0] = mdata[0] + mdata[1]
}
}
template<typename T>
__forceinline__ __device__ void blockReduce2(volatile T * mdata,
volatile T * sdata,
unsigned int tid,
unsigned int blockSize,
unsigned int maxDataSize)
{
__syncthreads();
if (blockSize >= 1024 && maxDataSize + WARP_SIZE >=512) { if (tid < 512) { sdata[tid] += sdata[tid + 512]; mdata[tid] += mdata[tid + 512]; } __syncthreads(); }
if (blockSize >= 512 && maxDataSize + WARP_SIZE >=256) { if (tid < 256) { sdata[tid] += sdata[tid + 256]; mdata[tid] += mdata[tid + 256]; } __syncthreads(); }
if (blockSize >= 256 && maxDataSize + WARP_SIZE >=128) { if (tid < 128) { sdata[tid] += sdata[tid + 128]; mdata[tid] += mdata[tid + 128]; } __syncthreads(); }
if (blockSize >= 128 && maxDataSize + WARP_SIZE >=64) { if (tid < 64) { sdata[tid] += sdata[tid + 64]; mdata[tid] += mdata[tid + 64]; } __syncthreads(); }
if (tid < 32) {
if (blockSize >= 64) { sdata[tid] += sdata[tid + 32]; mdata[tid] += mdata[tid + 32]; }
if (blockSize >= 32) { sdata[tid] += sdata[tid + 16]; mdata[tid] += mdata[tid + 16]; }
if (blockSize >= 16) { sdata[tid] += sdata[tid + 8]; mdata[tid] += mdata[tid + 8]; }
if (blockSize >= 8) { sdata[tid] += sdata[tid + 4]; mdata[tid] += mdata[tid + 4]; }
if (blockSize >= 4) { sdata[tid] += sdata[tid + 2]; mdata[tid] += mdata[tid + 2]; }
if (blockSize >= 2) { sdata[tid] += sdata[tid + 1]; mdata[tid] += mdata[tid + 1]; }
}
}
template<typename T>
__forceinline__ __device__ void blockReduce4(volatile T * sdata,
volatile T * mdata,
volatile T * rdata,
volatile T * tdata,
unsigned int tid,
unsigned int blockSize,
unsigned int maxDataSize)
{
__syncthreads();
if (blockSize >= 1024 && maxDataSize + WARP_SIZE >= 512) { if (tid < 512) { sdata[tid] += sdata[tid + 512]; mdata[tid] += mdata[tid + 512]; rdata[tid] += rdata[tid + 512]; tdata[tid] += tdata[tid + 512];} __syncthreads(); }
if (blockSize >= 512 && maxDataSize + WARP_SIZE >= 256) { if (tid < 256) { sdata[tid] += sdata[tid + 256]; mdata[tid] += mdata[tid + 256]; rdata[tid] += rdata[tid + 256]; tdata[tid] += tdata[tid + 256];} __syncthreads(); }
if (blockSize >= 256 && maxDataSize + WARP_SIZE >= 128) { if (tid < 128) { sdata[tid] += sdata[tid + 128]; mdata[tid] += mdata[tid + 128]; rdata[tid] += rdata[tid + 128]; tdata[tid] += tdata[tid + 128];} __syncthreads(); }
if (blockSize >= 128 && maxDataSize + WARP_SIZE >= 64) { if (tid < 64) { sdata[tid] += sdata[tid + 64]; mdata[tid] += mdata[tid + 64]; rdata[tid] += rdata[tid + 64]; tdata[tid] += tdata[tid + 64];} __syncthreads(); }
if (tid < 32) {
if (blockSize >= 64) { sdata[tid] += sdata[tid + 32]; mdata[tid] += mdata[tid + 32]; rdata[tid] += rdata[tid + 32]; tdata[tid] += tdata[tid + 32];}
if (blockSize >= 32) { sdata[tid] += sdata[tid + 16]; mdata[tid] += mdata[tid + 16]; rdata[tid] += rdata[tid + 16]; tdata[tid] += tdata[tid + 16];}
if (blockSize >= 16) { sdata[tid] += sdata[tid + 8]; mdata[tid] += mdata[tid + 8]; rdata[tid] += rdata[tid + 8]; tdata[tid] += tdata[tid + 8];}
if (blockSize >= 8) { sdata[tid] += sdata[tid + 4]; mdata[tid] += mdata[tid + 4]; rdata[tid] += rdata[tid + 4]; tdata[tid] += tdata[tid + 4];}
if (blockSize >= 4) { sdata[tid] += sdata[tid + 2]; mdata[tid] += mdata[tid + 2]; rdata[tid] += rdata[tid + 2]; tdata[tid] += tdata[tid + 2];}
if (blockSize >= 2) { sdata[tid] += sdata[tid + 1]; mdata[tid] += mdata[tid + 1]; rdata[tid] += rdata[tid + 1]; tdata[tid] += tdata[tid + 1];}
}
}
// Get largest memory address that is aligned to a warp worth of T
// and smaller than x.
template<typename T>
__forceinline__ __device__ uintptr_t getBlockBeginning(void const * x)
{
return (uintptr_t)(x) & (~((uintptr_t)(WARP_SIZE*sizeof(T)) - 1)) ;
}
// Use the current block of thread to sum over a given column of a matrix. The selected
// column is given by the thread block index in the block grid.
//
// This function uses an amoutn of scratch memory equal to blockSize*sizeof(T)
// where blockSize=blockDim.x.
template<typename T>
__forceinline__ __device__ T matrixSumHelper(T const * matrix, int numRows)
{
// One thread block per column to sum
// Shared memory is per-block, it holds blockSize intermediate reults
//extern __shared__ T scratch [] ;
SharedMemory<T> smem ;
T * scratch = smem.getPointer() ;
int tid = threadIdx.x ;
int column = blockIdx.x ;
int blockSize = blockDim.x ;
// Note that scratch is different for different blocks, hence
// matrix columns. Now fill scratch with partial sums, in a sliding-window
// manner.
scratch[tid] = 0 ;
T const * columnBegin = matrix + column * numRows ;
T const * columnEnd = columnBegin + numRows ;
T const * block = (T const*) getBlockBeginning<T>(columnBegin) + tid ;
while (block < columnEnd) {
if (block >= columnBegin) {
scratch[tid] += *block ;
}
block += blockSize ;
}
// Now scratch[] has blockSize partial sums for this column
// Finish by reducing and saving
blockReduce<T>(scratch, tid, blockSize, numRows) ;
return scratch[0] ;
}
// This kernel accumulates means and variances for the data.
// Each block of thread sums over one or more data planes, resulting
// in an array accumulator[] of dimension numBlocksPerChannel x 2*numChannels.
//
// If each thread block scans all the images, then numBlocksPerChannel = 1.
// However, for efficiency different thread blocks do different
// subset of images, resulting in numBlocksPerChannel partial results to be summed
// later by a second kernel.
//
// The first part accumulator[:,0:numChannels-1] stores the data for the mean
// and the second part accumulator[:,numChannels,2*numChannels-1] the data
// for the sigmas.
//
// This function uses the sliding-window summing technique described
// above. It requires
//
// 2*sizeof(T)*blockSize
//
// bytes of shared scratch memory to hold to hold partial sums for
// means and sigmas.
template<typename T>
__global__ void accumulate_moments_partial(T * accumulator,
T const * data,
int planeArea,
int numPlanes,
int numChannels,
int numBlocksPerChannel)
{
int tid = threadIdx.x ;
int plane = blockIdx.x ;
int blockSize = blockDim.x ;
int planeStride = gridDim.x ;
int channel = blockIdx.x % numChannels ;
//extern __shared__ T s [] ;
SharedMemory<T> smem ;
T * s = smem.getPointer() ;
T * mdata = s ;
T * sdata = mdata + blockSize ;
mdata[tid] = 0 ;
sdata[tid] = 0 ;
while (plane < numPlanes) {
T const * planeBegin = data + plane * planeArea ;
T const * planeEnd = planeBegin + planeArea ;
T const * block = (T const*) getBlockBeginning<T>(planeBegin) + tid ;
while (block < planeEnd) {
if (block >= planeBegin) {
T x = *block ;
mdata[tid] += x ;
sdata[tid] += x * x ;
}
block += blockSize ;
}
plane += planeStride ;
}
blockReduce2<T>(sdata, mdata, tid, blockSize, planeArea) ;
if (tid == 0) {
int chunk = blockIdx.x / numChannels ;
int i = chunk + channel * numBlocksPerChannel ;
accumulator[i] = mdata[0];
accumulator[i + gridDim.x] = sdata[0];
}
}
// This kernel sums over the accumulator computed by the function
// above to obtain the moments.
//
// This kernel uses matrixSumHelper() defined above. Hence:
//
// 1. The block grid must be set to have a block
// for each column of accumulator[]. There are here 2*numChannels columns.
//
// 2. There can be any (reasonable) blockSize. Blocks will iterate
// over rows as needed to compte the operation.
//
// 3. It must be called with `blockSize*sizeof(T)` shared
// scratch space.
template<typename T>
__global__ void accumulate_moments_finish(T * moments,
T const * accumulator,
int numRows)
{
int tid = threadIdx.x ;
int column = blockIdx.x ;
T x = matrixSumHelper(accumulator, numRows) ;
if (tid == 0) {
moments[column] = x ;
}
}
// After accumulation, we need to renormalize the moments.
//
// 1. It shoudl be called with enough threads to cover all
// numChannels in the moments.
//
// 2. The actual number of blocks is determined based on the block
// size to satisfy condition (2).
template<typename T>
__global__ void normalize_moments(T * moments,
unsigned int numChannels,
T mass,
T epsilon)
{
int unsigned i = blockIdx.x*blockDim.x+threadIdx.x;
if (i < numChannels){
// max(0, __) is for numerical issues
T mean = moments[i] / mass ;
T sigma2 = max((T).0, moments[i + numChannels]/mass - mean*mean) ;
moments[i] = mean ;
moments[i + numChannels] = sqrt(sigma2 + epsilon);
}
}
// Same as accumulate_moments above. Call with:
//
// 1. 2*sizeof(T)*blockSize scratch space
// 2.
//
// bytes of shared scratch memory to hold to hold partial sums for
// means and sigmas.
//
// Below, either accumulator is not NULL and derMultipliers, derBiases,
// and moments are, or the function is run in a `final' mode,
// with accumulator set to NULL, and the other points set to their
// `final' destination.
template<typename T>
__global__ void accumulate_ders_partial
(T * accumulator,
T * derMultipliers,
T * derBiases,
T const * data,
T const * derOutput,
int planeArea,
int numPlanes,
int numChannels,
int numBlocksPerChannel)
{
int tid = threadIdx.x ;
int plane = blockIdx.x ;
int blockSize = blockDim.x ;
int planeStride = gridDim.x ;
int channel = blockIdx.x % numChannels ;
//extern __shared__ T s[] ;
SharedMemory<T> smem ;
T * s = smem.getPointer() ;
T * mdata = s ;
T * sdata = mdata + blockSize ;
mdata[tid] = 0 ;
sdata[tid] = 0 ;
while (plane < numPlanes) {
T const * planeBegin = data + plane * planeArea ;
T const * planeEnd = planeBegin + planeArea ;
T const * block = (T const*) getBlockBeginning<T>(planeBegin) + tid ;
T const * dblock = derOutput + (block - data) ;
while (block < planeEnd) {
if (block >= planeBegin) {
T x = *block ;
T dy = *dblock ;
mdata[tid] += x * dy ;
sdata[tid] += dy ;
}
block += blockSize ;
dblock += blockSize ;
}
plane += planeStride ;
}
blockReduce2<T>(sdata, mdata, tid, blockSize, planeArea);
if (tid == 0) {
if (numChannels == gridDim.x) {
// Final output ready
derMultipliers[blockIdx.x] = mdata[0];
derBiases[blockIdx.x] = sdata[0];
} else {
// Partially accumulated outut
int chunk = blockIdx.x / numChannels ;
int i = chunk + channel * numBlocksPerChannel ;
accumulator[i] = mdata[0]; // derMultipliers
accumulator[i + gridDim.x] = sdata[0]; // derBiases
}
}
}
template<typename T>
__global__ void accumulate_ders_finish(T * derMultipliers,
T * derBiases,
T const * accumulator,
int numBlocksPerChannel,
int numChannels)
{
int tid = threadIdx.x ;
int column = blockIdx.x ;
T x = matrixSumHelper(accumulator, numBlocksPerChannel) ;
if (tid == 0) {
// Recall that the matrix stores in order [derMultipliers derBiases means sigmas]
// containing four types of data
int type = column / numChannels ;
int channel = column % numChannels ;
if (type == 0) {
derMultipliers[channel] = x ;
}
else {
derBiases[channel] = x ;
}
}
}
template<typename T>
__global__ void normalize_ders(T * derMultipliers,
T const * derBiases,
T const * moments,
unsigned int numChannels,
T mass,
T epsilon)
{
unsigned int idx = blockIdx.x*blockDim.x+threadIdx.x;
if (idx < numChannels){
T mean = moments[idx] ;
T sigma = moments[idx + numChannels] ;
derMultipliers[idx] = (derMultipliers[idx] - mean*derBiases[idx]) / sigma ;
}
}
// Same as accumulate_moments above. Call with:
//
// 1. 4*sizeof(T)*blockSize scratch space
// 2.
//
// bytes of shared scratch memory to hold to hold partial sums for
// means and sigmas.
//
// Below, either accumulator is not NULL and derMultipliers, derBiases,
// and moments are, or the function is run in a `final' mode,
// with accumulator set to NULL, and the other points set to their
// `final' destination.
template<typename T>
__global__ void accumulate_ders_and_moments_partial
(T * accumulator,
T * derMultipliers,
T * derBiases,
T * moments,
T const * data,
T const * derOutput,
int planeArea,
int numPlanes,
int numChannels,
int numBlocksPerChannel)
{
int tid = threadIdx.x ;
int plane = blockIdx.x ;
int blockSize = blockDim.x ;
int planeStride = gridDim.x ;
int channel = blockIdx.x % numChannels ;
//extern __shared__ T s[] ;
SharedMemory<T> smem ;
T * s = smem.getPointer() ;
T * mdata = s ;
T * sdata = mdata + blockSize ;
T * rdata = sdata + blockSize ;
T * tdata = rdata + blockSize ;
mdata[tid] = 0 ;
sdata[tid] = 0 ;
rdata[tid] = 0 ;
tdata[tid] = 0 ;
while (plane < numPlanes) {
T const * planeBegin = data + plane * planeArea ;
T const * planeEnd = planeBegin + planeArea ;
T const * block = (T const*) getBlockBeginning<T>(planeBegin) + tid ;
T const * dblock = derOutput + (block - data) ;
while (block < planeEnd) {
if (block >= planeBegin) {
T x = *block ;
T dy = *dblock ;
mdata[tid] += x * dy ;
sdata[tid] += dy ;
rdata[tid] += x * x ;
tdata[tid] += x ;
}
block += blockSize ;
dblock += blockSize ;
}
plane += planeStride ;
}
blockReduce4<T>(sdata, mdata, rdata, tdata, tid, blockSize, planeArea);
if (tid == 0) {
if (numChannels == gridDim.x) {
// Final output ready
derMultipliers[blockIdx.x] = mdata[0];
derBiases[blockIdx.x] = sdata[0];
moments[blockIdx.x] = tdata[0];
moments[blockIdx.x+numChannels] = rdata[0];
} else {
// Partially accumulated outut
int chunk = blockIdx.x / numChannels ;
int i = chunk + channel * numBlocksPerChannel ;
accumulator[i] = mdata[0]; // derMultipliers
accumulator[i + gridDim.x] = sdata[0]; // derBiases
accumulator[i + 2*gridDim.x] = tdata[0]; // means
accumulator[i + 3*gridDim.x] = rdata[0]; // sigmas
}
}
}
template<typename T>
__global__ void accumulate_ders_and_moments_finish(T * derMultipliers,
T * derBiases,
T * moments,
T const * accumulator,
int numBlocksPerChannel,
int numChannels)
{
int tid = threadIdx.x ;
int column = blockIdx.x ;
T x = matrixSumHelper(accumulator, numBlocksPerChannel) ;
if (tid == 0) {
// Recall that the matrix stores in order [derMultipliers derBiases means sigmas]
// containing four types of data
int type = column / numChannels ;
int channel = column % numChannels ;
if (type == 0) {
derMultipliers[channel] = x ;
}
else if (type == 1) {
derBiases[channel] = x ;
}
else if (type == 2) {
moments[channel] = x ;
}
else {
moments[channel + numChannels] = x ;
}
}
}
template<typename T>
__global__ void normalize_ders_and_moments(T * derMultipliers,
T * derBiases,
T * moments,
unsigned int numChannels,
T mass,
T epsilon)
{
unsigned int idx = blockIdx.x*blockDim.x+threadIdx.x;
if (idx < numChannels){
T mean = moments[idx] / mass;
T sigma2 = max((T).0, moments[idx + numChannels]/mass - mean*mean) ;
T sigma = sqrt(sigma2 + epsilon);
moments[idx] = mean ;
moments[idx + numChannels] = sigma ;
derMultipliers[idx] = (derMultipliers[idx]-mean*derBiases[idx]) / sigma ;
}
}
// Call this kernel like compute_moments, but it does not need a scratch sapce
template<typename T>
__global__ void batch_normalize_forward(T * outputData,
T const * moments,
T const * data,
T const * multipliers,
T const * biases,
int planeArea,
int numPlanes,
int numChannels)
{
int tid = threadIdx.x ;
int plane = blockIdx.x ;
int blockSize = blockDim.x ;
int planeStride = gridDim.x ;
int channel = blockIdx.x % numChannels ;
T mean = moments[channel];
T sigma = moments[channel+numChannels];
T multiplier = multipliers[channel];
T bias = biases[channel];
T coefficient = multiplier / sigma ;
while (plane < numPlanes) {
T const * planeBegin = data + plane * planeArea ;
T const * planeEnd = planeBegin + planeArea ;
T const * block = (T const*) getBlockBeginning<T>(planeBegin) + tid ;
T * oblock = outputData + (block - data) ;
while (block < planeEnd) {
if (block >= planeBegin) {
*oblock = coefficient * (*block - mean) + bias ;
}
block += blockSize ;
oblock += blockSize ;
}
plane += planeStride ;
}
}
template<typename T>
__global__ void batch_normalize_backward(T * derData,
T const * moments,
T const * data,
T const * multipliers,
T const * derMultipliers,
T const * derBiases,
T const * derOutput,
int planeArea,
int numPlanes,
int numChannels,
T mass)
{
int tid = threadIdx.x ;
int plane = blockIdx.x ;
int blockSize = blockDim.x ;
int planeStride = gridDim.x ;
int channel = blockIdx.x % numChannels ;
T mu = moments[channel];
T sigma = moments[channel + numChannels] ;
T multiplier = multipliers[channel] ;
T derMultiplier = derMultipliers[channel] ;
T muz = derBiases[channel] / mass;
T G1 = multiplier / sigma ;
T G2 = G1 * derMultiplier / (mass*sigma);
while (plane < numPlanes) {
T const * planeBegin = data + plane * planeArea ;
T const * planeEnd = planeBegin + planeArea ;
T const * block = (T const*) getBlockBeginning<T> (planeBegin) + tid ;
T const * dblock = derOutput + (block - data) ;
T * oblock = derData + (block - data) ;
while (block < planeEnd) {
if (block >= planeBegin) {
*oblock = G1 * (*dblock - muz) - G2 * (*block - mu);
}
block += blockSize ;
dblock += blockSize ;
oblock += blockSize ;
}
plane += planeStride ;
}
}
// -------------------------------------------------------------------
// Forward
// -------------------------------------------------------------------
template<DataType dataType>
struct BatchNormForwardWithMoment<VLDT_GPU, dataType>
{
vl::ErrorCode operator()(BatchNorm &op,
Tensor &output,
Tensor const &moment,
Tensor const &input,
Tensor const &multiplier,
Tensor const &bias)
{
cudaError_t status ;
typedef typename vl::DataTypeTraits<dataType>::type type ;
auto height = input.getHeight() ;
auto width = input.getWidth() ;
auto numChannels = input.getDepth() ;
auto size = input.getSize() ;
auto outputData = (type*)output.getMemory() ;
auto momentData = (type const*)moment.getMemory() ;
auto inputData = (type const*)input.getMemory() ;
auto multiplierData = (type const*)multiplier.getMemory() ;
auto biasData = (type const*)bias.getMemory() ;
size_t planeArea = height * width ;
size_t numPlanes = numChannels * size ;
// Compute number compute chunks.
size_t blockSize = getBlockSize(planeArea) ;
//size_t L = 10000 * blockSize ;
//size_t numBlocksPerChannel = (planeArea * size + L - 1) / L ;
//numBlocksPerChannel = std::min(numBlocksPerChannel, size) ;
//numBlocksPerChannel = std::min(numBlocksPerChannel, 65536 / numChannels) ;
//numBlocksPerChannel = std::max(numBlocksPerChannel, 1) ;
size_t numBlocksPerChannel = 1 ;
size_t numBlocks = numChannels * numBlocksPerChannel ;
assert(numBlocksPerChannel >= 1) ;
assert(numBlocksPerChannel <= size) ;
assert(numBlocks <= 65536) ;
batch_normalize_forward <<<numBlocks, blockSize>>>
(outputData, momentData, inputData, multiplierData, biasData,
planeArea, numPlanes, numChannels) ;
status = cudaPeekAtLastError() ;
return (status == cudaSuccess) ? vl::VLE_Success : vl::VLE_Cuda ;
}
} ;
template<DataType dataType>
struct BatchNormForward<VLDT_GPU, dataType>
{
vl::ErrorCode operator()(BatchNorm &op,
Tensor &output,
Tensor &moment,
Tensor const &input,
Tensor const &multiplier,
Tensor const &bias)
{
cudaError_t status ;
typedef typename vl::DataTypeTraits<dataType>::type type ;
auto height = input.getHeight() ;
auto width = input.getWidth() ;
auto numChannels = input.getDepth() ;
auto size = input.getSize() ;
auto outputData = (type*)output.getMemory() ;
auto inputData = (type const*)input.getMemory() ;
auto multiplierData = (type const*)multiplier.getMemory() ;
auto biasData = (type const*)bias.getMemory() ;
size_t planeArea = height * width ;
size_t numPlanes = numChannels * size ;
// Compute number compute chunks.
size_t blockSize = getBlockSize(planeArea) ;
//size_t L = 10000 * blockSize ;
//size_t numBlocksPerChannel = (planeArea * size + L - 1) / L ;
//numBlocksPerChannel = min(numBlocksPerChannel, size) ;
//numBlocksPerChannel = min(numBlocksPerChannel, 65536 / numChannels) ;
//numBlocksPerChannel = max(numBlocksPerChannel, 1) ;
size_t numBlocksPerChannel = 1 ;
size_t numBlocks = numChannels * numBlocksPerChannel ;
// Get scratch space.
size_t accumulatorSize = (numBlocksPerChannel == 1) ? 0 : 2 * nextMultipleOf(numBlocks, WARP_SIZE) ;
size_t workspaceSize = accumulatorSize + (moment.getMemory() ? 0 : 2 * numChannels) ;
type * workspace = (type*)op.context.getWorkspace(vl::VLDT_GPU, workspaceSize * sizeof(type)) ;
if (workspace == NULL && workspaceSize > 0) {
return VLE_OutOfMemory ;
}
type * accumulatorData = workspace ;
Tensor ownMoment(moment) ;
if (ownMoment.getMemory() == NULL) {
ownMoment.setMemory(workspace + accumulatorSize) ;
}
auto momentData = (type*)ownMoment.getMemory() ;
// Accumulate moments.
if (numBlocksPerChannel > 1) {
// Partial.
accumulate_moments_partial <<<numBlocks, blockSize, 2*blockSize*sizeof(type)>>>
(accumulatorData,
inputData,
planeArea,
numPlanes,
numChannels,
numBlocksPerChannel) ;
status = cudaPeekAtLastError() ;
if (status != cudaSuccess) return vl::VLE_Cuda ;
// Total.
int blockSizeForSum = getBlockSize(numBlocksPerChannel) ;
accumulate_moments_finish <<<2*numChannels, blockSizeForSum, blockSizeForSum*sizeof(type)>>>
(momentData, accumulatorData, numBlocksPerChannel) ;
status = cudaPeekAtLastError() ;
if (status != cudaSuccess) return vl::VLE_Cuda ;
} else {
// Total directly.
accumulate_moments_partial <<<numBlocks, blockSize, 2*blockSize*sizeof(type)>>>
(momentData,
inputData,
planeArea,
numPlanes,
numChannels,
1) ;
}
// Normalize moments.
type mass = planeArea*size;
normalize_moments <<<divideAndRoundUp(numChannels,blockSize),blockSize>>>
(momentData, numChannels, mass, (type)op.epsilon) ;
// Normalize the data and apply multipliers and bias.
batch_normalize_forward <<<numBlocks, blockSize>>>
(outputData,
momentData, inputData, multiplierData, biasData,
planeArea,
numPlanes,
numChannels) ;
status = cudaPeekAtLastError() ;
return (status == cudaSuccess) ? vl::VLE_Success : vl::VLE_Cuda ;
}
} ;
// -------------------------------------------------------------------
// Backward
// -------------------------------------------------------------------
template<DataType dataType>
struct BatchNormBackwardWithMoment<VLDT_GPU, dataType>
{
vl::ErrorCode operator()(BatchNorm &op,
Tensor &derInput,
Tensor &derMultiplier,
Tensor &derBias,
Tensor const &moment,
Tensor const &input,
Tensor const &multiplier,
Tensor const &bias,
Tensor const &derOutput)
{
cudaError_t status ;
typedef typename vl::DataTypeTraits<dataType>::type type ;
auto height = input.getHeight() ;
auto width = input.getWidth() ;
auto numChannels = input.getDepth() ;
auto size = input.getSize() ;
auto derInputData = (type*)derInput.getMemory() ;
auto derBiasData = (type*)derBias.getMemory() ;
auto derMultiplierData = (type*)derMultiplier.getMemory() ;
auto inputData = (type const*)input.getMemory() ;
auto momentData = (type const*)moment.getMemory() ;
auto multiplierData = (type const*)multiplier.getMemory() ;
auto biasData = (type const*)bias.getMemory() ;
auto derOutputData = (type const*)derOutput.getMemory() ;
size_t planeArea = height * width ;
size_t numPlanes = numChannels * size ;
// Compute number compute chunks.
size_t blockSize = getBlockSize(planeArea) ;
//size_t L = 10000 * blockSize ;
//size_t numBlocksPerChannel = (planeArea * size + L - 1) / L ;
//numBlocksPerChannel = std::min(numBlocksPerChannel, size) ;
//numBlocksPerChannel = std::min(numBlocksPerChannel, 65536 / numChannels) ;
//numBlocksPerChannel = std::max(numBlocksPerChannel, 1) ;
size_t numBlocksPerChannel = 1 ;
size_t numBlocks = numChannels * numBlocksPerChannel ;
// Mean, variance, derMultiplier and derBias computation.
if (numBlocksPerChannel > 1) {
// Get scratch space.
size_t workspaceSize = 2 * nextMultipleOf(numBlocks, WARP_SIZE) ;
type * accumulatorData = (type*)op.context.getWorkspace
(vl::VLDT_GPU, workspaceSize * sizeof(type)) ;
if (accumulatorData == 0) {
return VLE_OutOfMemory ;
}
// Partial.
accumulate_ders_partial<type> <<<numBlocks, blockSize, 2*blockSize*sizeof(type)>>>
(accumulatorData,
NULL, NULL,
inputData,
derOutputData,
planeArea,
numPlanes,
numChannels,
numBlocksPerChannel) ;
status = cudaPeekAtLastError() ;
if (status != cudaSuccess) return vl::VLE_Cuda ;
// Total.
int blockSizeSum = getBlockSize(numBlocksPerChannel) ;
accumulate_ders_finish<type> <<<2*numChannels, blockSizeSum, blockSizeSum*sizeof(type)>>>
(derMultiplierData, derBiasData, accumulatorData, numBlocksPerChannel, numChannels) ;
status = cudaPeekAtLastError() ;
if (status != cudaSuccess) return vl::VLE_Cuda ;
}
else {
// Total.
accumulate_ders_partial<type> <<<numBlocks, blockSize, 2*blockSize*sizeof(type)>>>
(NULL,
derMultiplierData, derBiasData, inputData, derOutputData,
planeArea,
numPlanes,
numChannels,
1) ;
status = cudaPeekAtLastError() ;
if (status != cudaSuccess) return vl::VLE_Cuda ;
}
// Normalize derMultiplier and derBias.
type mass = planeArea*size;
normalize_ders<type> <<<divideAndRoundUp(numChannels,blockSize),blockSize>>>
(derMultiplierData, derBiasData, momentData, numChannels, mass, op.epsilon) ;
// Compute input derivative.
batch_normalize_backward<type> <<<numBlocks, blockSize>>>
(derInputData, momentData, inputData, multiplierData,
derMultiplierData, derBiasData, derOutputData,
planeArea, numPlanes, numChannels,
mass) ;
status = cudaPeekAtLastError() ;
if (status != cudaSuccess) return vl::VLE_Cuda ;
return VLE_Success ;
}
} ;
template<DataType dataType>
struct BatchNormBackward<VLDT_GPU, dataType>
{
vl::ErrorCode operator()(BatchNorm &op,
Tensor &derInput,
Tensor &derMultiplier,
Tensor &derBias,
Tensor &moment,
Tensor const &input,
Tensor const &multiplier,
Tensor const &bias,
Tensor const &derOutput)
{
cudaError_t status ;
typedef typename vl::DataTypeTraits<dataType>::type type ;
auto height = input.getHeight() ;
auto width = input.getWidth() ;
auto numChannels = input.getDepth() ;
auto size = input.getSize() ;
auto derInputData = (type*)derInput.getMemory() ;
auto derBiasData = (type*)derBias.getMemory() ;
auto derMultiplierData = (type*)derMultiplier.getMemory() ;
auto inputData = (type const*)input.getMemory() ;
auto multiplierData = (type const*)multiplier.getMemory() ;
auto biasData = (type const*)bias.getMemory() ;
auto derOutputData = (type const*)derOutput.getMemory() ;
size_t planeArea = height * width ;
size_t numPlanes = numChannels * size ;
// Compute number compute chunks.
size_t blockSize = getBlockSize(planeArea) ;
//size_t L = 10000 * blockSize ;
//size_t numBlocksPerChannel = (planeArea * size + L - 1) / L ;
//numBlocksPerChannel = min(numBlocksPerChannel, size) ;
//numBlocksPerChannel = min(numBlocksPerChannel, 65536 / numChannels) ;
//numBlocksPerChannel = max(numBlocksPerChannel, 1) ;
size_t numBlocksPerChannel = 1 ;
size_t numBlocks = numChannels * numBlocksPerChannel ;
// Get scratch space.
size_t accumulatorSize = (numBlocksPerChannel == 1) ? 0 : 4 * nextMultipleOf(numBlocks, WARP_SIZE) ;
size_t workspaceSize = accumulatorSize + (moment.getMemory() ? 0 : 2 * numChannels) ;
type * workspace = (type*)op.context.getWorkspace(vl::VLDT_GPU, workspaceSize * sizeof(type)) ;
type * accumulatorData = workspace ;
if (workspace == NULL) {
return VLE_OutOfMemory ;
}
Tensor ownMoment(moment) ;
if (ownMoment.getMemory() == NULL) {
ownMoment.setMemory(workspace + accumulatorSize) ;
}
auto momentData = (type*)ownMoment.getMemory() ;
// Mean, variance, derMultiplier and derBias computation.
if (numBlocksPerChannel > 1) {
// Partial.
accumulate_ders_and_moments_partial<type> <<<numBlocks, blockSize, 4*blockSize*sizeof(type)>>>
(accumulatorData,
NULL, NULL, NULL,
inputData,
derOutputData,
planeArea,
numPlanes,
numChannels,
numBlocksPerChannel) ;
status = cudaPeekAtLastError() ;
if (status != cudaSuccess) return vl::VLE_Cuda ;
// Total.
int blockSizeSum = getBlockSize(numBlocksPerChannel) ;
accumulate_ders_and_moments_finish<type> <<<4*numChannels, blockSizeSum, blockSizeSum*sizeof(type)>>>
(derMultiplierData, derBiasData, momentData, accumulatorData, numBlocksPerChannel, numChannels) ;
status = cudaPeekAtLastError() ;
if (status != cudaSuccess) return vl::VLE_Cuda ;
}
else {
// Total.
accumulate_ders_and_moments_partial<type> <<<numBlocks, blockSize, 4*blockSize*sizeof(type)>>>
(NULL,
derMultiplierData, derBiasData, momentData,
inputData, derOutputData,
planeArea,
numPlanes,
numChannels,
1) ;
status = cudaPeekAtLastError() ;
if (status != cudaSuccess) return vl::VLE_Cuda ;
}
// Normalize derMultiplier and derBias.
type mass = planeArea*size;
normalize_ders_and_moments<type> <<<divideAndRoundUp(numChannels,blockSize),blockSize>>>
(derMultiplierData, derBiasData, momentData, numChannels, mass, op.epsilon) ;
// Compute derInput.
batch_normalize_backward<type> <<<numBlocks, blockSize>>>
(derInputData,
momentData, inputData,
multiplierData, derMultiplierData, derBiasData, derOutputData,
planeArea, numPlanes, numChannels,
mass) ;
status = cudaPeekAtLastError() ;
if (status != cudaSuccess) return vl::VLE_Cuda ;
return VLE_Success ;
}
} ;
|
the_stack
|
#include "flowfilter/gpu/image.h"
#include "flowfilter/gpu/gpu_deleter.h"
#include "flowfilter/gpu/error.h"
namespace flowfilter {
namespace gpu {
//#################################################
// GPUImage
//#################################################
GPUImage::GPUImage() {
__width = 0;
__height = 0;
__depth = 0;
__pitch = 0;
__itemSize = 0;
}
GPUImage::GPUImage(const int height, const int width,
const int depth, const int itemSize) {
__height = height;
__width = width;
__depth = depth;
__itemSize = itemSize;
// allocate memory in GPU space
allocate();
}
GPUImage::~GPUImage() {
// nothing to do
// device buffer is released by gpu_deleter
// std::cout << "GPUImage::~GPUImage(): [" <<
// __height << ", " << __width << ", " << __depth << "] : " << __ptr_dev.use_count() << std::endl;
}
int GPUImage::height() const {
return __height;
}
int GPUImage::width() const {
return __width;
}
int GPUImage::depth() const {
return __depth;
}
int GPUImage::pitch() const {
return __pitch;
}
int GPUImage::itemSize() const {
return __itemSize;
}
void* GPUImage::data() {
return __ptr_dev.get();
}
std::shared_ptr<void> GPUImage::getDeviceBuffer() {
return __ptr_dev;
}
void GPUImage::upload(flowfilter::image_t& img) {
// check if device memory is allocated
if(!__ptr_dev) {
// set resolution to input image
__width = img.width;
__height = img.height;
__depth = img.depth;
__itemSize = img.itemSize;
// allocate memory
allocate();
}
// compare shapes
if(compareShape(img)) {
// print first 5 elements of img
// for(int i = 0; i < 5; i ++) {
// std::cout << i << ": " << (int)static_cast<unsigned char*>(img.data)[i] << std::endl;
// }
// issue synchronous memory copy
checkError(cudaMemcpy2D(__ptr_dev.get(), __pitch, img.data, img.pitch,
__width*__depth*__itemSize, __height,
cudaMemcpyHostToDevice));
// TODO: add support for asynchronous copy
} else {
std::cerr << "ERROR: GPUImage::upload(): shapes do not match."
<< "required: [" << __height << ", " << __width << ", " << __depth << "][" << __itemSize << "], passed: "
<< "[" << img.height << ", " << img.width << ", " << img.depth << "][" << img.itemSize << "]" << std::endl;
throw std::invalid_argument("GPUImage::upload(): shapes do not match. Required: [" +
std::to_string(__height) + ", " + std::to_string(__width) + ", " + std::to_string(__depth) + "][" + std::to_string(__itemSize) + "], passed: [" +
std::to_string(img.height) + ", " + std::to_string(img.width) + ", " + std::to_string(img.depth) + "][" + std::to_string(img.itemSize) + "]");
}
}
void GPUImage::download(flowfilter::image_t& img) const {
if(!__ptr_dev) {
std::cerr << "ERROR: GPUImage::download(): unallocated image" << std::endl;
return; // TODO: throw exception
}
if(compareShape(img)) {
// issue synchronous memory copy
checkError(cudaMemcpy2D(img.data, img.pitch, __ptr_dev.get(), __pitch,
__width*__depth*__itemSize, __height, cudaMemcpyDeviceToHost));
// print first 5 elements of img
// for(int i = 0; i < 5; i ++) {
// std::cout << i << ": " << static_cast<float*>(img.data)[i] << std::endl;
// }
} else {
std::cerr << "ERROR: GPUImage::download(): shapes do not match."
<< "required: [" << __height << ", " << __width << ", " << __depth << "][" << __itemSize << "], passed: "
<< "[" << img.height << ", " << img.width << ", " << img.depth << "][" << img.itemSize << "]" << std::endl;
throw std::invalid_argument("GPUImage::download(): shapes do not match. Required: [" +
std::to_string(__height) + ", " + std::to_string(__width) + ", " + std::to_string(__depth) + "][" + std::to_string(__itemSize) + "], passed: [" +
std::to_string(img.height) + ", " + std::to_string(img.width) + ", " + std::to_string(img.depth) + "][" + std::to_string(img.itemSize) + "]");
}
}
void GPUImage::copyFrom(GPUImage& img) {
if(compareShapeGPU(img)) {
// issue synchronous memory copy
checkError(cudaMemcpy2D(__ptr_dev.get(), __pitch,
img.__ptr_dev.get(), img.__pitch,
__width*__depth*__itemSize, __height, cudaMemcpyDeviceToDevice));
} else {
std::cerr << "ERROR: GPUImage::copyFrom(): shapes do not match."
<< "required: [" << __height << ", " << __width << ", " << __depth << "][" << __itemSize << "], passed: "
<< "[" << img.__height << ", " << img.__width << ", " << img.__depth << "][" << img.__itemSize << "]" << std::endl;
throw std::invalid_argument("GPUImage::copyFrom(): shapes do not match. Required: [" +
std::to_string(__height) + ", " + std::to_string(__width) + ", " + std::to_string(__depth) + "][" + std::to_string(__itemSize) + "], passed: [" +
std::to_string(img.__height) + ", " + std::to_string(img.__width) + ", " + std::to_string(img.__depth) + "][" + std::to_string(img.__itemSize) + "]");
}
}
void GPUImage::clear() {
checkError(cudaMemset2D(__ptr_dev.get(), __pitch,
0, __width*__depth*__itemSize, __height));
}
void GPUImage::allocate() {
// std::cout << "GPUImage::allocate()" << std::endl;
void* buffer_dev = nullptr;
checkError(cudaMallocPitch(&buffer_dev, &__pitch,
__width*__depth*__itemSize, __height));
// create a new shared pointer
__ptr_dev = std::shared_ptr<void> (buffer_dev, gpu_deleter<void>());
// std::cout << "\tpitch: " << __pitch << std::endl;
// if(err != cudaSuccess) {
// std::cerr << "ERROR: GPUImage device memory allocation: " << cudaGetErrorString(err) << std::endl;
// // TODO: throw exception?
// throw std::bad_alloc("GPUImage::allocate(): device memory allocation error: " + cudaGetErrorString(err));
// }
}
bool GPUImage::compareShape(const flowfilter::image_t& img) const {
return __height == img.height &&
__width == img.width &&
__depth == img.depth &&
__itemSize == img.itemSize;
}
bool GPUImage::compareShapeGPU(const flowfilter::gpu::GPUImage& img) const {
return __height == img.height() &&
__width == img.width() &&
__depth == img.depth() &&
__pitch == img.pitch() &&
__itemSize == img.itemSize();
}
//#################################################
// GPUTexture
//#################################################
GPUTexture::GPUTexture() {
// texture object is not valid
__validTexture = false;
__refCounter = std::make_shared<int>(0);
}
GPUTexture::GPUTexture( GPUImage& img, cudaChannelFormatKind format) :
GPUTexture(img, format, cudaAddressModeClamp,
cudaFilterModePoint, cudaReadModeElementType, false) {
}
GPUTexture::GPUTexture( GPUImage& img,
cudaChannelFormatKind format,
cudaTextureReadMode readMode) :
GPUTexture(img, format, cudaAddressModeClamp,
cudaFilterModePoint, readMode, false) {
}
GPUTexture::GPUTexture( flowfilter::gpu::GPUImage& img,
cudaChannelFormatKind format,
cudaTextureReadMode readMode,
const bool normalizedCoords) :
GPUTexture(img, format, cudaAddressModeClamp, cudaFilterModePoint,
readMode, normalizedCoords) {
}
GPUTexture::GPUTexture( GPUImage& img,
cudaChannelFormatKind format,
cudaTextureAddressMode addressMode,
cudaTextureFilterMode filterMode,
cudaTextureReadMode readMode,
const bool normalizedCoords) {
__refCounter = std::make_shared<int>(0);
// hold input image
__image = img;
// configure CUDA texture
configure(format, addressMode, filterMode, readMode, normalizedCoords);
}
GPUTexture::~GPUTexture() {
// std::cout << "GPUTexture::~GPUTexture(): " << __refCounter.use_count() << " : " << __texture << std::endl;
// only attempts to destroy the texture if the creation
// was successful
if(__refCounter.use_count() == 1) {
if(__validTexture) {
checkError(cudaDestroyTextureObject(__texture));
}
}
// __image destructor is called automatically and
// devide buffer is released only if it's not being
// shared in any other part of the program.
}
cudaTextureObject_t GPUTexture::getTextureObject() {
return __texture;
}
GPUImage GPUTexture::getImage() {
return __image;
}
void GPUTexture::configure( cudaChannelFormatKind format,
cudaTextureAddressMode addressMode,
cudaTextureFilterMode filterMode,
cudaTextureReadMode readMode,
const bool normalizedCoords) {
__validTexture = false;
int channels = __image.depth();
if(channels > 4) {
std::cerr << "ERROR: GPUTexture::configure(): image channels greater than 4: " << channels << std::endl;
throw std::invalid_argument("GPUTexture::configure(): image channels greater than 4, got: " + std::to_string(channels));
}
// bit width of element
int bitWidth = 8 * __image.itemSize();
// bit width of each channel
int w1 = bitWidth; // there is at least one channel
int w2 = channels >= 2? bitWidth : 0;
int w3 = channels >= 3? bitWidth : 0;
int w4 = channels == 4? bitWidth : 0;
// std::cout << "GPUTexture::configure(): channel width: [" << w1 << ", " << w2 << ", " << w3 << ", " << w4 << "]" << std::endl;
// std::cout << "GPUTexture::configure(): [" << __image.height() << ", " << __image.width() << ", " << __image.depth() << "] size: " << __image.itemSize() << " pitch: " << __image.pitch() << std::endl;
// channel descriptor
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(w1, w2, w3, w4, format);
// texture descriptor
cudaTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = addressMode;
texDesc.addressMode[1] = addressMode;
texDesc.filterMode = filterMode;
texDesc.readMode = readMode;
texDesc.normalizedCoords = normalizedCoords;
// texture buffer descriptor
cudaResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = cudaResourceTypePitch2D;
resDesc.res.pitch2D.desc = channelDesc;
resDesc.res.pitch2D.devPtr = __image.data();
resDesc.res.pitch2D.pitchInBytes = __image.pitch();
resDesc.res.pitch2D.width = __image.width();
resDesc.res.pitch2D.height = __image.height();
// creates texture
checkError(cudaCreateTextureObject(&__texture, &resDesc, &texDesc, NULL));
__validTexture = true;
// cudaError_t err = cudaCreateTextureObject(&__texture, &resDesc, &texDesc, NULL);
// std::cout << "GPUTexture::configure(): texture ID: " << __texture << std::endl;
// if(err == cudaSuccess) {
// __validTexture = true;
// } else {
// std::cerr << "ERROR: GPUTexture::configure(): texture creation: "
// << cudaGetErrorString(err) << std::endl;
// __validTexture = false;
// }
}
}; // namespace gpu
}; // namespace flowfilter
|
the_stack
|
#include "core1/panoRemapper.hpp"
#include "core/rect.hpp"
#include "core/transformGeoParams.hpp"
#include "backend/common/imageOps.hpp"
#include "backend/cuda/deviceBuffer.hpp"
#include "backend/cuda/deviceStream.hpp"
#include "backend/cuda/surface.hpp"
#include "cuda/error.hpp"
#include "cuda/util.hpp"
#include "gpu/core1/transform.hpp"
#include "gpu/buffer.hpp"
#include "gpu/memcpy.hpp"
#include "gpu/allocator.hpp"
#include "libvideostitch/panoDef.hpp"
#include "backend/cuda/core/transformStack.cu"
#include "core/kernels/withinStack.cu"
namespace VideoStitch {
namespace Core {
template <Convert2D3DFnT toSphere, class OutputCropper>
__global__ void remapKernel(uint32_t* g_odata, cudaTextureObject_t remapTex, int panoWidth, int panoHeight,
const float2 inPanoScale, const float2 outPanoScale, const vsfloat3x3 R) {
unsigned x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < panoWidth && y < panoHeight) {
if (OutputCropper::isPanoPointVisible(x, y, panoWidth, panoHeight)) {
float2 uv = make_float2((float)x, (float)y);
/**
* The transformations are applied relative to the center of the panorama image
*/
uv.x -= (panoWidth - 1) / 2.0f;
uv.y -= (panoHeight - 1) / 2.0f;
/**
* Apply transform stack
*/
uv.x /= outPanoScale.x;
uv.y /= outPanoScale.y;
float3 pt = toSphere(uv);
pt = rotateSphere(pt, R);
uv = SphereToErect(pt);
uv.x *= inPanoScale.x;
uv.y *= inPanoScale.y;
/**
* See notes in warp kernel
* compensate fetching offset with cudaFilterModeLinear by adding 0.5f
* https://stackoverflow.com/questions/10643790/texture-memory-tex2d-basics
*/
uv.x += panoWidth / 2.0f;
uv.y += panoHeight / 2.0f;
float4 px = tex2D<float4>(remapTex, uv.x, uv.y);
g_odata[y * panoWidth + x] = Image::RGBA::pack(__float2uint_rn(px.x * 255.), __float2uint_rn(px.y * 255.),
__float2uint_rn(px.z * 255.), __float2uint_rn(px.w * 255.));
} else {
g_odata[y * panoWidth + x] = 0;
}
}
}
__global__ void remapCubemapKernel(uint32_t* __restrict__ xPositive, uint32_t* __restrict__ xNegative,
uint32_t* __restrict__ yPositive, uint32_t* __restrict__ yNegative,
uint32_t* __restrict__ zPositive, uint32_t* __restrict__ zNegative, int panoWidth,
int panoHeight, cudaTextureObject_t remapTex, int faceDim, const float2 panoScale,
const vsfloat3x3 R) {
unsigned x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < faceDim && y < faceDim) {
/* compensate fetching offset with cudaFilterModeLinear by adding 0.5f */
float2 uv = make_float2(x + 0.5f, y + 0.5f);
uv = (uv / faceDim) * 2.f - make_float2(1.f, 1.f);
float3 pt;
for (unsigned int face = 0; face < 6; face++) {
// Layer 0 is positive X face
if (face == 0) {
pt.x = 1;
pt.y = -uv.y;
pt.z = -uv.x;
}
// Layer 1 is negative X face
else if (face == 1) {
pt.x = -1;
pt.y = -uv.y;
pt.z = uv.x;
}
// Layer 2 is positive Y face
else if (face == 2) {
pt.x = uv.x;
pt.y = 1;
pt.z = uv.y;
}
// Layer 3 is negative Y face
else if (face == 3) {
pt.x = uv.x;
pt.y = -1;
pt.z = -uv.y;
}
// Layer 4 is positive Z face
else if (face == 4) {
pt.x = uv.x;
pt.y = -uv.y;
pt.z = 1;
}
// Layer 5 is negative Z face
else if (face == 5) {
pt.x = -uv.x;
pt.y = -uv.y;
pt.z = -1;
}
pt = rotateSphere(pt, R);
float2 xy = SphereToErect(pt);
xy *= panoScale;
/**
* See notes in warp kernel
*/
xy.x += panoWidth / 2.0f;
xy.y += panoHeight / 2.0f;
float4 px = tex2D<float4>(remapTex, xy.x, xy.y);
if (face == 0) {
xPositive[y * faceDim + x] = Image::RGBA::pack(__float2uint_rn(px.x * 255.), __float2uint_rn(px.y * 255.),
__float2uint_rn(px.z * 255.), __float2uint_rn(px.w * 255.));
} else if (face == 1) {
xNegative[y * faceDim + x] = Image::RGBA::pack(__float2uint_rn(px.x * 255.), __float2uint_rn(px.y * 255.),
__float2uint_rn(px.z * 255.), __float2uint_rn(px.w * 255.));
} else if (face == 2) {
yPositive[y * faceDim + x] = Image::RGBA::pack(__float2uint_rn(px.x * 255.), __float2uint_rn(px.y * 255.),
__float2uint_rn(px.z * 255.), __float2uint_rn(px.w * 255.));
} else if (face == 3) {
yNegative[y * faceDim + x] = Image::RGBA::pack(__float2uint_rn(px.x * 255.), __float2uint_rn(px.y * 255.),
__float2uint_rn(px.z * 255.), __float2uint_rn(px.w * 255.));
} else if (face == 4) {
zPositive[y * faceDim + x] = Image::RGBA::pack(__float2uint_rn(px.x * 255.), __float2uint_rn(px.y * 255.),
__float2uint_rn(px.z * 255.), __float2uint_rn(px.w * 255.));
} else if (face == 5) {
zNegative[y * faceDim + x] = Image::RGBA::pack(__float2uint_rn(px.x * 255.), __float2uint_rn(px.y * 255.),
__float2uint_rn(px.z * 255.), __float2uint_rn(px.w * 255.));
}
}
}
}
template <bool equiangular>
__global__ void rotateCubemapKernel(uint32_t* __restrict__ xPositive, uint32_t* __restrict__ xNegative,
uint32_t* __restrict__ yPositive, uint32_t* __restrict__ yNegative,
uint32_t* __restrict__ zPositive, uint32_t* __restrict__ zNegative, int faceDim,
cudaTextureObject_t remapTex, const vsfloat3x3 R) {
unsigned x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < faceDim && y < faceDim) {
float2 uv = make_float2((float)x, (float)y);
uv = (uv / faceDim) * 2.f - make_float2(1.f, 1.f);
if (equiangular) {
uv.x = tanf_vs(uv.x * PI_F_VS / 4.);
uv.y = tanf_vs(uv.y * PI_F_VS / 4.);
}
float3 pt;
for (unsigned int face = 0; face < 6; face++) {
// Layer 0 is positive X face
if (face == 0) {
pt.x = 1;
pt.y = -uv.y;
pt.z = -uv.x;
}
// Layer 1 is negative X face
else if (face == 1) {
pt.x = -1;
pt.y = -uv.y;
pt.z = uv.x;
}
// Layer 2 is positive Y face
else if (face == 2) {
pt.x = uv.x;
pt.y = 1;
pt.z = uv.y;
}
// Layer 3 is negative Y face
else if (face == 3) {
pt.x = uv.x;
pt.y = -1;
pt.z = -uv.y;
}
// Layer 4 is positive Z face
else if (face == 4) {
pt.x = uv.x;
pt.y = -uv.y;
pt.z = 1;
}
// Layer 5 is negative Z face
else if (face == 5) {
pt.x = -uv.x;
pt.y = -uv.y;
pt.z = -1;
}
pt = rotateSphere(pt, R);
if (equiangular) {
// first normalize with Chebyshev distance to project back on the cube
float cheb = fmaxf(abs(pt.x), abs(pt.y));
cheb = fmaxf(cheb, abs(pt.z));
pt /= cheb;
// then reinflate the cube
pt.x = 4. / PI_F_VS * atanf_vs(pt.x);
pt.y = 4. / PI_F_VS * atanf_vs(pt.y);
pt.z = 4. / PI_F_VS * atanf_vs(pt.z);
}
float4 px = texCubemap<float4>(remapTex, pt.x, pt.y, pt.z);
if (face == 0) {
xPositive[y * faceDim + x] = Image::RGBA::pack(__float2uint_rn(px.x * 255.), __float2uint_rn(px.y * 255.),
__float2uint_rn(px.z * 255.), __float2uint_rn(px.w * 255.));
} else if (face == 1) {
xNegative[y * faceDim + x] = Image::RGBA::pack(__float2uint_rn(px.x * 255.), __float2uint_rn(px.y * 255.),
__float2uint_rn(px.z * 255.), __float2uint_rn(px.w * 255.));
} else if (face == 2) {
yPositive[y * faceDim + x] = Image::RGBA::pack(__float2uint_rn(px.x * 255.), __float2uint_rn(px.y * 255.),
__float2uint_rn(px.z * 255.), __float2uint_rn(px.w * 255.));
} else if (face == 3) {
yNegative[y * faceDim + x] = Image::RGBA::pack(__float2uint_rn(px.x * 255.), __float2uint_rn(px.y * 255.),
__float2uint_rn(px.z * 255.), __float2uint_rn(px.w * 255.));
} else if (face == 4) {
zPositive[y * faceDim + x] = Image::RGBA::pack(__float2uint_rn(px.x * 255.), __float2uint_rn(px.y * 255.),
__float2uint_rn(px.z * 255.), __float2uint_rn(px.w * 255.));
} else if (face == 5) {
zNegative[y * faceDim + x] = Image::RGBA::pack(__float2uint_rn(px.x * 255.), __float2uint_rn(px.y * 255.),
__float2uint_rn(px.z * 255.), __float2uint_rn(px.w * 255.));
}
}
}
}
Status rotateCubemap(const PanoDefinition& pano, GPU::CubemapSurface& cubemapSurface, GPU::Buffer<uint32_t> xPosPbo,
GPU::Buffer<uint32_t> xNegPbo, GPU::Buffer<uint32_t> yPosPbo, GPU::Buffer<uint32_t> yNegPbo,
GPU::Buffer<uint32_t> zPosPbo, GPU::Buffer<uint32_t> zNegPbo, const Matrix33<double>& perspective,
bool equiangular, GPU::Stream stream) {
vsfloat3x3 rotation;
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 3; j++) {
rotation.values[i][j] = (float)perspective(i, j);
}
}
dim3 block(16, 16, 1);
dim3 grid((unsigned)Cuda::ceilDiv(pano.getLength(), block.x), (unsigned)Cuda::ceilDiv(pano.getLength(), block.y), 1);
if (equiangular) {
rotateCubemapKernel<true><<<grid, block, 0, stream.get()>>>(
xPosPbo.get(), xNegPbo.get(), yPosPbo.get(), yNegPbo.get(), zPosPbo.get(), zNegPbo.get(), (int)pano.getLength(),
cubemapSurface.get().texture(), rotation);
} else {
rotateCubemapKernel<false><<<grid, block, 0, stream.get()>>>(
xPosPbo.get(), xNegPbo.get(), yPosPbo.get(), yNegPbo.get(), zPosPbo.get(), zNegPbo.get(), (int)pano.getLength(),
cubemapSurface.get().texture(), rotation);
}
return CUDA_STATUS;
}
__device__ float3 positiveX(float2& uv) {
float3 pt;
pt.x = 1;
pt.y = -uv.y;
pt.z = uv.x;
return pt;
}
__device__ float3 negativeX(float2& uv) {
float3 pt;
pt.x = -1;
pt.y = -uv.y;
pt.z = -uv.x;
return pt;
}
__device__ float3 positiveY(float2& uv) {
float3 pt;
pt.x = uv.x;
pt.y = 1;
pt.z = -uv.y;
return pt;
}
__device__ float3 negativeY(float2& uv) {
float3 pt;
pt.x = uv.x;
pt.y = -1;
pt.z = uv.y;
return pt;
}
__device__ float3 positiveZ(float2& uv) {
float3 pt;
pt.x = uv.x;
pt.y = -uv.y;
pt.z = -1;
return pt;
}
__device__ float3 negativeZ(float2& uv) {
float3 pt;
pt.x = -uv.x;
pt.y = -uv.y;
pt.z = 1;
return pt;
}
template <float3 (*project)(float2&), bool equiangular>
__global__ void remapMaskFace(unsigned char* __restrict__ face, int dstOffsetX, int dstOffsetY, int bbWidth,
int bbHeight, int panoWidth, int panoHeight, cudaTextureObject_t remapTex, int srcOffsetX,
int srcOffsetY, int faceDim, const float2 panoScale) {
unsigned x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < bbWidth && y < bbHeight) {
/* compensate fetching offset with cudaFilterModeLinear by adding 0.5f */
float2 uv = make_float2(x + dstOffsetX + 0.5f, y + dstOffsetY + 0.5f);
uv = (uv / faceDim) * 2.f - make_float2(1.f, 1.f);
if (equiangular) {
uv.x = tanf_vs(uv.x * PI_F_VS / 4.);
uv.y = tanf_vs(uv.y * PI_F_VS / 4.);
}
float3 pt = project(uv);
float2 xy = SphereToErect(pt);
xy *= panoScale;
/**
* See notes in warp kernel
*/
xy.x += panoWidth / 2.0f;
xy.y += panoHeight / 2.0f;
xy.x -= srcOffsetX;
xy.y -= srcOffsetY;
if (xy.x < 0.) {
xy.x += panoWidth;
}
float px = tex2D<float>(remapTex, xy.x, xy.y);
face[y * bbWidth + x] = __float2uint_rn(px * 255.);
}
}
Status reprojectAlphaToCubemap(int panoWidth, int panoHeight, int faceLength, GPU::Surface& alphaSurface,
Rect equirectBB, GPU::Buffer<unsigned char> xPosAlpha, Rect xPosBB,
GPU::Buffer<unsigned char> xNegAlpha, Rect xNegBB, GPU::Buffer<unsigned char> yPosAlpha,
Rect yPosBB, GPU::Buffer<unsigned char> yNegAlpha, Rect yNegBB,
GPU::Buffer<unsigned char> zPosAlpha, Rect zPosBB, GPU::Buffer<unsigned char> zNegAlpha,
Rect zNegBB, bool equiangular, GPU::Stream stream) {
dim3 block(16, 16, 1);
float2 panoScale = {TransformGeoParams::computePanoScale(PanoProjection::Equirectangular, panoWidth, 360.f),
2 * TransformGeoParams::computePanoScale(PanoProjection::Equirectangular, panoHeight, 360.f)};
if (!xPosBB.empty()) {
dim3 gridXPos((unsigned)Cuda::ceilDiv(xPosBB.getWidth(), block.x),
(unsigned)Cuda::ceilDiv(xPosBB.getHeight(), block.y), 1);
if (equiangular) {
remapMaskFace<positiveX, true><<<gridXPos, block, 0, stream.get()>>>(
xPosAlpha.get().raw(), (unsigned)xPosBB.left(), (unsigned)xPosBB.top(), (unsigned)xPosBB.getWidth(),
(unsigned)xPosBB.getHeight(), (unsigned)panoWidth, (unsigned)panoHeight, alphaSurface.get().texture(),
(unsigned)equirectBB.left(), (unsigned)equirectBB.top(), (unsigned)faceLength, panoScale);
} else {
remapMaskFace<positiveX, false><<<gridXPos, block, 0, stream.get()>>>(
xPosAlpha.get().raw(), (unsigned)xPosBB.left(), (unsigned)xPosBB.top(), (unsigned)xPosBB.getWidth(),
(unsigned)xPosBB.getHeight(), (unsigned)panoWidth, (unsigned)panoHeight, alphaSurface.get().texture(),
(unsigned)equirectBB.left(), (unsigned)equirectBB.top(), (unsigned)faceLength, panoScale);
}
}
if (!xNegBB.empty()) {
dim3 gridXNeg((unsigned)Cuda::ceilDiv(xNegBB.getWidth(), block.x),
(unsigned)Cuda::ceilDiv(xNegBB.getHeight(), block.y), 1);
if (equiangular) {
remapMaskFace<negativeX, true><<<gridXNeg, block, 0, stream.get()>>>(
xNegAlpha.get().raw(), (unsigned)xNegBB.left(), (unsigned)xNegBB.top(), (unsigned)xNegBB.getWidth(),
(unsigned)xNegBB.getHeight(), (unsigned)panoWidth, (unsigned)panoHeight, alphaSurface.get().texture(),
(unsigned)equirectBB.left(), (unsigned)equirectBB.top(), (unsigned)faceLength, panoScale);
} else {
remapMaskFace<negativeX, false><<<gridXNeg, block, 0, stream.get()>>>(
xNegAlpha.get().raw(), (unsigned)xNegBB.left(), (unsigned)xNegBB.top(), (unsigned)xNegBB.getWidth(),
(unsigned)xNegBB.getHeight(), (unsigned)panoWidth, (unsigned)panoHeight, alphaSurface.get().texture(),
(unsigned)equirectBB.left(), (unsigned)equirectBB.top(), (unsigned)faceLength, panoScale);
}
}
if (!yPosBB.empty()) {
dim3 gridYPos((unsigned)Cuda::ceilDiv(yPosBB.getWidth(), block.x),
(unsigned)Cuda::ceilDiv(yPosBB.getHeight(), block.y), 1);
if (equiangular) {
remapMaskFace<positiveY, true><<<gridYPos, block, 0, stream.get()>>>(
yPosAlpha.get().raw(), (unsigned)yPosBB.left(), (unsigned)yPosBB.top(), (unsigned)yPosBB.getWidth(),
(unsigned)yPosBB.getHeight(), (unsigned)panoWidth, (unsigned)panoHeight, alphaSurface.get().texture(),
(unsigned)equirectBB.left(), (unsigned)equirectBB.top(), (unsigned)faceLength, panoScale);
} else {
remapMaskFace<positiveY, false><<<gridYPos, block, 0, stream.get()>>>(
yPosAlpha.get().raw(), (unsigned)yPosBB.left(), (unsigned)yPosBB.top(), (unsigned)yPosBB.getWidth(),
(unsigned)yPosBB.getHeight(), (unsigned)panoWidth, (unsigned)panoHeight, alphaSurface.get().texture(),
(unsigned)equirectBB.left(), (unsigned)equirectBB.top(), (unsigned)faceLength, panoScale);
}
}
if (!yNegBB.empty()) {
dim3 gridYNeg((unsigned)Cuda::ceilDiv(yNegBB.getWidth(), block.x),
(unsigned)Cuda::ceilDiv(yNegBB.getHeight(), block.y), 1);
if (equiangular) {
remapMaskFace<negativeY, true><<<gridYNeg, block, 0, stream.get()>>>(
yNegAlpha.get().raw(), (unsigned)yNegBB.left(), (unsigned)yNegBB.top(), (unsigned)yNegBB.getWidth(),
(unsigned)yNegBB.getHeight(), (unsigned)panoWidth, (unsigned)panoHeight, alphaSurface.get().texture(),
(unsigned)equirectBB.left(), (unsigned)equirectBB.top(), (unsigned)faceLength, panoScale);
} else {
remapMaskFace<negativeY, false><<<gridYNeg, block, 0, stream.get()>>>(
yNegAlpha.get().raw(), (unsigned)yNegBB.left(), (unsigned)yNegBB.top(), (unsigned)yNegBB.getWidth(),
(unsigned)yNegBB.getHeight(), (unsigned)panoWidth, (unsigned)panoHeight, alphaSurface.get().texture(),
(unsigned)equirectBB.left(), (unsigned)equirectBB.top(), (unsigned)faceLength, panoScale);
}
}
if (!zPosBB.empty()) {
dim3 gridZPos((unsigned)Cuda::ceilDiv(zPosBB.getWidth(), block.x),
(unsigned)Cuda::ceilDiv(zPosBB.getHeight(), block.y), 1);
if (equiangular) {
remapMaskFace<positiveZ, true><<<gridZPos, block, 0, stream.get()>>>(
zPosAlpha.get().raw(), (unsigned)zPosBB.left(), (unsigned)zPosBB.top(), (unsigned)zPosBB.getWidth(),
(unsigned)zPosBB.getHeight(), (unsigned)panoWidth, (unsigned)panoHeight, alphaSurface.get().texture(),
(unsigned)equirectBB.left(), (unsigned)equirectBB.top(), (unsigned)faceLength, panoScale);
} else {
remapMaskFace<positiveZ, false><<<gridZPos, block, 0, stream.get()>>>(
zPosAlpha.get().raw(), (unsigned)zPosBB.left(), (unsigned)zPosBB.top(), (unsigned)zPosBB.getWidth(),
(unsigned)zPosBB.getHeight(), (unsigned)panoWidth, (unsigned)panoHeight, alphaSurface.get().texture(),
(unsigned)equirectBB.left(), (unsigned)equirectBB.top(), (unsigned)faceLength, panoScale);
}
}
if (!zNegBB.empty()) {
dim3 gridZNeg((unsigned)Cuda::ceilDiv(zNegBB.getWidth(), block.x),
(unsigned)Cuda::ceilDiv(zNegBB.getHeight(), block.y), 1);
if (equiangular) {
remapMaskFace<negativeZ, true><<<gridZNeg, block, 0, stream.get()>>>(
zNegAlpha.get().raw(), (unsigned)zNegBB.left(), (unsigned)zNegBB.top(), (unsigned)zNegBB.getWidth(),
(unsigned)zNegBB.getHeight(), (unsigned)panoWidth, (unsigned)panoHeight, alphaSurface.get().texture(),
(unsigned)equirectBB.left(), (unsigned)equirectBB.top(), (unsigned)faceLength, panoScale);
} else {
remapMaskFace<negativeZ, false><<<gridZNeg, block, 0, stream.get()>>>(
zNegAlpha.get().raw(), (unsigned)zNegBB.left(), (unsigned)zNegBB.top(), (unsigned)zNegBB.getWidth(),
(unsigned)zNegBB.getHeight(), (unsigned)panoWidth, (unsigned)panoHeight, alphaSurface.get().texture(),
(unsigned)equirectBB.left(), (unsigned)equirectBB.top(), (unsigned)faceLength, panoScale);
}
}
return CUDA_STATUS;
}
template <Convert2D3DFnT toSphere, class OutputCropper>
Status reprojectPanorama(GPU::Buffer<uint32_t> pbo, float2 dstScale, GPU::Surface& tex, float2 srcScale, unsigned width,
unsigned height, const Matrix33<double>& perspective, GPU::Stream stream) {
dim3 dimBlock(16, 16, 1);
dim3 dimGrid((unsigned)Cuda::ceilDiv(width, dimBlock.x), (unsigned)Cuda::ceilDiv(height, dimBlock.y), 1);
vsfloat3x3 rotation;
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 3; j++) {
rotation.values[i][j] = (float)perspective(i, j);
}
}
remapKernel<toSphere, OutputCropper><<<dimGrid, dimBlock, 0, stream.get()>>>(pbo.get(), tex.get().texture(), width,
height, srcScale, dstScale, rotation);
return CUDA_STATUS;
}
Status reprojectRectilinear(GPU::Buffer<uint32_t> pbo, float2 outScale, GPU::Surface& tex, float2 inScale,
unsigned width, unsigned height, const Matrix33<double>& perspective, GPU::Stream stream) {
return reprojectPanorama<RectToSphere, OutputRectCropper>(pbo, outScale, tex, inScale, width, height, perspective,
stream);
}
Status reprojectEquirectangular(GPU::Buffer<uint32_t> pbo, float2 outScale, GPU::Surface& tex, float2 inScale,
unsigned width, unsigned height, const Matrix33<double>& perspective,
GPU::Stream stream) {
return reprojectPanorama<ErectToSphere, OutputRectCropper>(pbo, outScale, tex, inScale, width, height, perspective,
stream);
}
Status reprojectFullFrameFisheye(GPU::Buffer<uint32_t> pbo, float2 outScale, GPU::Surface& tex, float2 inScale,
unsigned width, unsigned height, const Matrix33<double>& perspective,
GPU::Stream stream) {
return reprojectPanorama<FisheyeToSphere, OutputRectCropper>(pbo, outScale, tex, inScale, width, height, perspective,
stream);
}
Status reprojectCircularFisheye(GPU::Buffer<uint32_t> pbo, float2 outScale, GPU::Surface& tex, float2 inScale,
unsigned width, unsigned height, const Matrix33<double>& perspective,
GPU::Stream stream) {
return reprojectPanorama<FisheyeToSphere, OutputCircleCropper>(pbo, outScale, tex, inScale, width, height,
perspective, stream);
}
Status reprojectStereographic(GPU::Buffer<uint32_t> pbo, float2 outScale, GPU::Surface& tex, float2 inScale,
unsigned width, unsigned height, const Matrix33<double>& perspective,
GPU::Stream stream) {
return reprojectPanorama<StereoToSphere, OutputRectCropper>(pbo, outScale, tex, inScale, width, height, perspective,
stream);
}
} // namespace Core
} // namespace VideoStitch
|
the_stack
|
// TODO: make it less vague(use proper include)
#include "../gpu/kmeans/kmeans_centroids.h"
#include <thrust/host_vector.h>
TEST(KMeansCentroids, CalculateCentroids) {
// GIVEN
int k = 2;
int d = 2;
int n = 4;
// Setup data
thrust::host_vector<float> dataHost(n * d);
dataHost[0] = 0.0f;
dataHost[1] = 0.0f; // [0,0]
dataHost[2] = 0.0f;
dataHost[3] = 1.0f; // [0,1]
dataHost[4] = 1.0f;
dataHost[5] = 1.0f; // [1,1]
dataHost[6] = 1.0f;
dataHost[7] = 0.0f; // [1,1]
thrust::device_vector<float> dataDevice(n * d);
dataDevice = dataHost;
// Setup counts
thrust::device_vector<int> countsDevice(k);
countsDevice[0] = 0;
countsDevice[1] = 0;
// Setup labels
thrust::host_vector<int> labelsHost(n);
labelsHost[0] = 0; // label for [0,0] -> 0
labelsHost[1] = 0; // label for [0,1] -> 0
labelsHost[2] = 1; // label for [1,1] -> 1
labelsHost[3] = 1; // label for [1,0] -> 1
thrust::device_vector<int> labelsDevice(n);
labelsDevice = labelsHost;
// Setup indices
thrust::host_vector<int> indicesHost(n);
indicesHost[0] = 0;
indicesHost[1] = 1;
indicesHost[2] = 2;
indicesHost[3] = 3;
thrust::device_vector<int> indicesDevice(n);
indicesDevice = indicesHost;
// Setup centroids
thrust::host_vector<float> centroidsHost(d * k);
centroidsHost[0] = 0.0f;
centroidsHost[1] = 0.0f;
centroidsHost[2] = 0.0f;
centroidsHost[3] = 0.0f;
thrust::device_vector<float> centroidsDevice(d * k);
centroidsDevice = centroidsHost;
int n_threads_x = 64;
int n_threads_y = 16;
kmeans::detail::
calculate_centroids<<<dim3(1, 30), dim3(n_threads_x, n_threads_y), 0>>>(
n, d, k, thrust::raw_pointer_cast(dataDevice.data()),
thrust::raw_pointer_cast(labelsDevice.data()),
thrust::raw_pointer_cast(indicesDevice.data()),
thrust::raw_pointer_cast(centroidsDevice.data()),
thrust::raw_pointer_cast(countsDevice.data()));
// THEN
centroidsHost = centroidsDevice;
ASSERT_FLOAT_EQ(0.0f, centroidsHost.data()[0]);
ASSERT_FLOAT_EQ(1.0f, centroidsHost.data()[1]);
ASSERT_FLOAT_EQ(2.0f, centroidsHost.data()[2]);
ASSERT_FLOAT_EQ(1.0f, centroidsHost.data()[3]);
SUCCEED();
}
// Calculating centroids "on 2 GPUs" should yield the same result as
// calculating centroids on 1 GPU from all data
TEST(KMeansCentroids, CalculateCentroids2GPU) {
/**
* CALCULATE CENTROIDS IN 2 TURNS, EACH TIME FROM HALF THE DATA
* */
// GIVEN
int k = 2;
int d = 2;
int n = 3;
thrust::host_vector<float> dataHost(n * d);
thrust::device_vector<float> dataDevice(n * d);
thrust::device_vector<int> countsDevice(k);
thrust::host_vector<int> labelsHost(n);
thrust::device_vector<int> labelsDevice(n);
thrust::host_vector<float> centroidsHost(d * k);
thrust::device_vector<float> centroidsDevice(d * k);
thrust::host_vector<float> centroidsHostFirst(d * k);
thrust::host_vector<int> indicesHost(n);
thrust::device_vector<int> indicesDevice(n);
int n_threads_x = 64;
int n_threads_y = 16;
indicesHost[0] = 0;
indicesHost[1] = 1;
indicesHost[2] = 2;
dataHost[0] = 4.0f;
dataHost[1] = 2.0f; // [4,2]
dataHost[2] = 1.0f;
dataHost[3] = 0.0f; // [1,0]
dataHost[4] = 4.0f;
dataHost[5] = 0.0f; // [4,0]
countsDevice[0] = 0;
countsDevice[1] = 0;
labelsHost[0] = 0;
labelsHost[1] = 0;
labelsHost[2] = 0;
centroidsHost[0] = 0.0f;
centroidsHost[1] = 0.0f;
centroidsHost[2] = 0.0f;
centroidsHost[3] = 0.0f;
indicesDevice = indicesHost;
dataDevice = dataHost;
labelsDevice = labelsHost;
centroidsDevice = centroidsHost;
// Run on "gpu1"
kmeans::detail::
calculate_centroids<<<dim3(1, 30), dim3(n_threads_x, n_threads_y), 0>>>(
n, d, k, thrust::raw_pointer_cast(dataDevice.data()),
thrust::raw_pointer_cast(labelsDevice.data()),
thrust::raw_pointer_cast(indicesDevice.data()),
thrust::raw_pointer_cast(centroidsDevice.data()),
thrust::raw_pointer_cast(countsDevice.data()));
centroidsHostFirst = centroidsDevice;
// Setup data for "gpu2"
dataHost[0] = 4.0f;
dataHost[1] = 4.0f; // [4,4]
dataHost[2] = 1.0f;
dataHost[3] = 4.0f; // [1,4]
dataHost[4] = 1.0f;
dataHost[5] = 2.0f; // [1,2]
countsDevice[0] = 0;
countsDevice[1] = 0;
labelsHost[0] = 0;
labelsHost[1] = 1;
labelsHost[2] = 1;
centroidsHost[0] = 0.0f;
centroidsHost[1] = 0.0f;
centroidsHost[2] = 0.0f;
centroidsHost[3] = 0.0f;
dataDevice = dataHost;
labelsDevice = labelsHost;
centroidsDevice = centroidsHost;
kmeans::detail::memzero(countsDevice);
kmeans::detail::memzero(centroidsDevice);
// Run on "gpu2"
kmeans::detail::
calculate_centroids<<<dim3(1, 30), dim3(n_threads_x, n_threads_y), 0>>>(
n, d, k, thrust::raw_pointer_cast(dataDevice.data()),
thrust::raw_pointer_cast(labelsDevice.data()),
thrust::raw_pointer_cast(indicesDevice.data()),
thrust::raw_pointer_cast(centroidsDevice.data()),
thrust::raw_pointer_cast(countsDevice.data()));
centroidsHost = centroidsDevice;
centroidsHost.data()[0] += centroidsHostFirst.data()[0];
centroidsHost.data()[1] += centroidsHostFirst.data()[1];
centroidsHost.data()[2] += centroidsHostFirst.data()[2];
centroidsHost.data()[3] += centroidsHostFirst.data()[3];
/**
* CALCULATE CENTROIDS IN 1 TURN, FROM ALL DATA
* */
k = 2;
d = 2;
n = 6;
// Setup data
thrust::host_vector<float> dataHost2(n * d);
dataHost2[0] = 4.0f;
dataHost2[1] = 2.0f; // [0,0]
dataHost2[2] = 1.0f;
dataHost2[3] = 0.0f; // [0,1]
dataHost2[4] = 4.0f;
dataHost2[5] = 0.0f; // [1,1]
dataHost2[6] = 4.0f;
dataHost2[7] = 4.0f; // [1,1]
dataHost2[8] = 1.0f;
dataHost2[9] = 4.0f; // [1,1]
dataHost2[10] = 1.0f;
dataHost2[11] = 2.0f; // [1,1]
thrust::device_vector<float> dataDevice2(n * d);
dataDevice2 = dataHost2;
// Setup counts
thrust::device_vector<int> countsDevice2(k);
// Setup labels
thrust::host_vector<int> labelsHost2(n);
labelsHost2[0] = 0;
labelsHost2[1] = 0;
labelsHost2[2] = 0;
labelsHost2[3] = 0;
labelsHost2[4] = 1;
labelsHost2[5] = 1;
thrust::device_vector<int> labelsDevice2(n);
labelsDevice2 = labelsHost2;
// Setup indices
thrust::host_vector<int> indicesHost2(n);
indicesHost2[0] = 0;
indicesHost2[1] = 1;
indicesHost2[2] = 2;
indicesHost2[3] = 3;
indicesHost2[4] = 4;
indicesHost2[5] = 5;
thrust::device_vector<int> indicesDevice2(n);
indicesDevice2 = indicesHost2;
// Setup centroids
thrust::device_vector<float> centroidsDevice2(d * k);
kmeans::detail::memzero(countsDevice2);
kmeans::detail::memzero(centroidsDevice2);
kmeans::detail::
calculate_centroids<<<dim3(1, 30), dim3(n_threads_x, n_threads_y), 0>>>(
n, d, k, thrust::raw_pointer_cast(dataDevice2.data()),
thrust::raw_pointer_cast(labelsDevice2.data()),
thrust::raw_pointer_cast(indicesDevice2.data()),
thrust::raw_pointer_cast(centroidsDevice2.data()),
thrust::raw_pointer_cast(countsDevice2.data()));
// THEN
thrust::host_vector<float> centroidsHost2(d * k);
centroidsHost2 = centroidsDevice2;
ASSERT_FLOAT_EQ(centroidsHost2.data()[0], centroidsHost.data()[0]);
ASSERT_FLOAT_EQ(centroidsHost2.data()[1], centroidsHost.data()[1]);
ASSERT_FLOAT_EQ(centroidsHost2.data()[2], centroidsHost.data()[2]);
ASSERT_FLOAT_EQ(centroidsHost2.data()[3], centroidsHost.data()[3]);
SUCCEED();
}
TEST(KMeansCentroids, RevertCentroidZeroing) {
// GIVEN
int k = 3;
int d = 2;
// Setup counts
thrust::host_vector<int> countsHost(k);
countsHost[0] = 1;
countsHost[1] = 0;
countsHost[2] = 1;
thrust::device_vector<int> countsDevice(k);
countsDevice = countsHost;
// Setup tmp centroids (original)
thrust::host_vector<float> tmp_centroidsHost(d * k);
tmp_centroidsHost[0] = 1.0f;
tmp_centroidsHost[1] = 1.0f;
tmp_centroidsHost[2] = 2.0f;
tmp_centroidsHost[3] = 2.0f;
tmp_centroidsHost[4] = 3.0f;
tmp_centroidsHost[5] = 3.0f;
thrust::device_vector<float> tmp_centroidsDevice(d * k);
tmp_centroidsDevice = tmp_centroidsHost;
// Setup centroids
thrust::host_vector<float> centroidsHost(d * k);
centroidsHost[0] = 5.0f;
centroidsHost[1] = 5.0f;
centroidsHost[2] = 0.0f;
centroidsHost[3] = 0.0f;
centroidsHost[4] = 4.0f;
centroidsHost[5] = 4.0f;
thrust::device_vector<float> centroidsDevice(d * k);
centroidsDevice = centroidsHost;
// WHEN
kmeans::detail::revert_zeroed_centroids<<<
dim3((d - 1) / 32 + 1, (k - 1) / 32 + 1), dim3(32, 32), 0>>>(
d, k, thrust::raw_pointer_cast(tmp_centroidsDevice.data()),
thrust::raw_pointer_cast(centroidsDevice.data()),
thrust::raw_pointer_cast(countsDevice.data()));
// THEN
centroidsHost = centroidsDevice;
ASSERT_FLOAT_EQ(5.0f, centroidsHost.data()[0]);
ASSERT_FLOAT_EQ(5.0f, centroidsHost.data()[1]);
ASSERT_FLOAT_EQ(2.0f, centroidsHost.data()[2]);
ASSERT_FLOAT_EQ(2.0f, centroidsHost.data()[3]);
ASSERT_FLOAT_EQ(4.0f, centroidsHost.data()[4]);
ASSERT_FLOAT_EQ(4.0f, centroidsHost.data()[5]);
SUCCEED();
}
TEST(KMeansCentroids, CentroidsScaling) {
// GIVEN
int k = 2;
int d = 2;
// Setup counts
thrust::host_vector<int> countsHost(k);
countsHost[0] = 4;
countsHost[1] = 2;
thrust::device_vector<int> countsDevice(k);
countsDevice = countsHost;
// Setup centroids
thrust::host_vector<float> centroidsHost(d * k);
centroidsHost[0] = 1.0f;
centroidsHost[1] = 2.0f;
centroidsHost[2] = 3.0f;
centroidsHost[3] = 4.0f;
thrust::device_vector<float> centroidsDevice(d * k);
centroidsDevice = centroidsHost;
// WHEN
kmeans::detail::scale_centroids<<<dim3((d - 1) / 32 + 1, (k - 1) / 32 + 1),
dim3(32, 32), 0>>>(
d, k, thrust::raw_pointer_cast(countsDevice.data()),
thrust::raw_pointer_cast(centroidsDevice.data()));
// THEN
centroidsHost = centroidsDevice;
ASSERT_FLOAT_EQ(0.25f, centroidsHost.data()[0]);
ASSERT_FLOAT_EQ(0.5f, centroidsHost.data()[1]);
ASSERT_FLOAT_EQ(1.5f, centroidsHost.data()[2]);
ASSERT_FLOAT_EQ(2.0f, centroidsHost.data()[3]);
SUCCEED();
}
|
the_stack
|
__host__ __device__ int64_t LOOP_COUNT(uint32_t bits, test_t test) {
if(test==xt_add)
return 1000*8192/bits;
else if(test==xt_sub)
return 1000*8192/bits;
else if(test==xt_accumulate)
return 1000*8192/bits;
else if(test==xt_mul)
return 100*8192/bits;
else if(test==xt_div_qr)
return 40*8192/bits;
else if(test==xt_sqrt)
return 40*8192/bits;
else if(test==xt_powm_odd)
return 8192/bits;
else if(test==xt_mont_reduce)
return 100*8192/bits;
else if(test==xt_gcd)
return 10*8192/bits;
else if(test==xt_modinv)
return 10*8192/bits;
else
return 0;
}
void from_mpz(uint32_t *words, uint32_t count, mpz_t value) {
size_t written;
if(mpz_sizeinbase(value, 2)>count*32) {
fprintf(stderr, "from_mpz failed -- result does not fit\n");
exit(1);
}
mpz_export(words, &written, -1, sizeof(uint32_t), 0, 0, value);
while(written<count)
words[written++]=0;
}
template<uint32_t tpi, uint32_t bits>
class xmp_tester {
public:
typedef struct {
cgbn_mem_t<bits> x0, x1, x2;
cgbn_mem_t<bits> o0, o1;
cgbn_mem_t<bits> w0, w1;
cgbn_mem_t<bits> r;
} x_instance_t;
typedef cgbn_context_t<tpi> context_t;
typedef cgbn_env_t<context_t, bits> env_t;
typedef typename env_t::cgbn_t bn_t;
typedef typename env_t::cgbn_local_t bn_local_t;
typedef typename env_t::cgbn_wide_t bn_wide_t;
typedef typename env_t::cgbn_accumulator_t bn_accumulator_t;
context_t _context;
env_t _env;
int32_t _instance;
__device__ __forceinline__ xmp_tester(cgbn_monitor_t monitor, cgbn_error_report_t *report, int32_t instance) : _context(monitor, report, (uint32_t)instance), _env(_context), _instance(instance) {
}
static __host__ x_instance_t *x_generate_instances(gmp_randstate_t state, uint32_t count) {
x_instance_t *instances=(x_instance_t *)malloc(sizeof(x_instance_t)*count);
mpz_t value;
mpz_init(value);
for(int index=0;index<count;index++) {
mpz_urandomb(value, state, bits);
from_mpz(instances[index].x0._limbs, bits/32, value);
mpz_urandomb(value, state, bits);
from_mpz(instances[index].x1._limbs, bits/32, value);
mpz_urandomb(value, state, bits);
from_mpz(instances[index].x2._limbs, bits/32, value);
mpz_urandomb(value, state, bits);
mpz_setbit(value, 0);
from_mpz(instances[index].o0._limbs, bits/32, value);
mpz_urandomb(value, state, bits);
mpz_setbit(value, 0);
from_mpz(instances[index].o1._limbs, bits/32, value);
mpz_urandomb(value, state, 2*bits);
from_mpz(instances[index].w0._limbs, bits*2/32, value);
mpz_urandomb(value, state, 2*bits);
from_mpz(instances[index].w1._limbs, bits*2/32, value);
}
mpz_clear(value);
return instances;
}
__device__ __forceinline__ void x_test_add(x_instance_t *instances);
__device__ __forceinline__ void x_test_sub(x_instance_t *instances);
__device__ __forceinline__ void x_test_accumulate(x_instance_t *instances);
__device__ __forceinline__ void x_test_mul(x_instance_t *instances);
__device__ __forceinline__ void x_test_div_qr(x_instance_t *instances);
__device__ __forceinline__ void x_test_sqrt(x_instance_t *instances);
__device__ __forceinline__ void x_test_powm_odd(x_instance_t *instances);
__device__ __forceinline__ void x_test_mont_reduce(x_instance_t *instances);
__device__ __forceinline__ void x_test_gcd(x_instance_t *instances);
__device__ __forceinline__ void x_test_modinv(x_instance_t *instances);
};
#include "xmp_tests.cu"
#include "xmp_test_powm.cu"
template<uint32_t tpi, uint32_t bits>
void x_run_test(test_t operation, typename xmp_tester<tpi, bits>::x_instance_t *instances, uint32_t count) {
int threads=128, IPB=threads/tpi, blocks=(count+IPB-1)*tpi/threads;
if(operation==xt_add)
x_test_add_kernel<tpi, bits><<<blocks, threads>>>(instances, count);
else if(operation==xt_sub)
x_test_sub_kernel<tpi, bits><<<blocks, threads>>>(instances, count);
else if(operation==xt_accumulate)
x_test_accumulate_kernel<tpi, bits><<<blocks, threads>>>(instances, count);
else if(operation==xt_mul)
x_test_mul_kernel<tpi, bits><<<blocks, threads>>>(instances, count);
else if(operation==xt_div_qr)
x_test_div_qr_kernel<tpi, bits><<<blocks, threads>>>(instances, count);
else if(operation==xt_sqrt)
x_test_sqrt_kernel<tpi, bits><<<blocks, threads>>>(instances, count);
else if(operation==xt_powm_odd)
x_test_powm_odd_kernel<tpi, bits><<<blocks, threads>>>(instances, count);
else if(operation==xt_mont_reduce)
x_test_mont_reduce_kernel<tpi, bits><<<blocks, threads>>>(instances, count);
else if(operation==xt_gcd)
x_test_gcd_kernel<tpi, bits><<<blocks, threads>>>(instances, count);
else if(operation==xt_modinv)
x_test_modinv_kernel<tpi, bits><<<blocks, threads>>>(instances, count);
else {
printf("Unsupported operation -- needs to be added to x_run_test<...> in xmp_tester.cu\n");
exit(1);
}
}
template<uint32_t tpi, uint32_t bits>
void x_run_test(stats_t *stats, test_t operation, void *instances, uint32_t count, uint32_t repetitions) {
typedef typename xmp_tester<tpi, bits>::x_instance_t x_instance_t;
x_instance_t *gpuInstances;
cudaEvent_t start, stop;
float time;
double total=0;
CUDA_CHECK(cudaMalloc((void **)&gpuInstances, sizeof(x_instance_t)*count));
CUDA_CHECK(cudaMemcpy(gpuInstances, instances, sizeof(x_instance_t)*count, cudaMemcpyHostToDevice));
CUDA_CHECK(cudaEventCreate(&start));
CUDA_CHECK(cudaEventCreate(&stop));
stats->operation=operation;
stats->tpi=tpi;
stats->size=bits;
printf(" ms:");
// warm up run
x_run_test<tpi, bits>(operation, (x_instance_t *)gpuInstances, count);
for(int32_t run=0;run<repetitions;run++) {
CUDA_CHECK(cudaDeviceSynchronize());
CUDA_CHECK(cudaEventRecord(start, 0));
x_run_test<tpi, bits>(operation, (x_instance_t *)gpuInstances, count);
CUDA_CHECK(cudaEventRecord(stop, 0));
CUDA_CHECK(cudaEventSynchronize(stop));
CUDA_CHECK(cudaEventElapsedTime(&time, start, stop));
printf(" %0.3f", time);
fflush(stdout);
total=total+time;
}
printf("\n");
total=total/1000.0;
CUDA_CHECK(cudaFree(gpuInstances));
stats->instances=((int64_t)count)*LOOP_COUNT(bits, operation);
stats->time=total/(double)repetitions;
stats->throughput=stats->instances/stats->time;
stats->next=NULL;
}
bool x_supported_size(uint32_t size) {
return size==128 || size==256 || size==512 ||
size==1024 || size==2048 || size==3072 || size==4096 ||
size==5120 || size==6144 || size==7168 || size==8192;
}
bool x_supported_tpi_size(uint32_t tpi, uint32_t size) {
if(size==128 && tpi==4)
return true;
else if(size==256 && (tpi==4 || tpi==8))
return true;
else if(size==512 && (tpi==4 || tpi==8 || tpi==16))
return true;
else if(size==1024 && (tpi==8 || tpi==16 || tpi==32))
return true;
else if(size==2048 && (tpi==8 || tpi==16 || tpi==32))
return true;
else if(size==3072 && (tpi==16 || tpi==32))
return true;
else if(size==4096 && (tpi==16 || tpi==32))
return true;
else if(size==5120 && tpi==32)
return true;
else if(size==6144 && tpi==32)
return true;
else if(size==7168 && tpi==32)
return true;
else if(size==8192 && tpi==32)
return true;
return false;
}
void x_run_test(stats_t *stats, test_t operation, uint32_t tpi, uint32_t size, void *instances, uint32_t count, uint32_t repetitions) {
if(!x_supported_tpi_size(tpi, size)) {
printf("Unsupported tpi and size -- needs to be added to x_run_test in xmp_tester.cu\n");
exit(1);
}
if(tpi==4 && size==128)
x_run_test<4, 128>(stats, operation, instances, count, repetitions);
else if(tpi==4 && size==256)
x_run_test<4, 256>(stats, operation, instances, count, repetitions);
else if(tpi==8 && size==256)
x_run_test<8, 256>(stats, operation, instances, count, repetitions);
else if(tpi==4 && size==512)
x_run_test<4, 512>(stats, operation, instances, count, repetitions);
else if(tpi==8 && size==512)
x_run_test<8, 512>(stats, operation, instances, count, repetitions);
else if(tpi==16 && size==512)
x_run_test<16, 512>(stats, operation, instances, count, repetitions);
else if(tpi==8 && size==1024)
x_run_test<8, 1024>(stats, operation, instances, count, repetitions);
else if(tpi==16 && size==1024)
x_run_test<16, 1024>(stats, operation, instances, count, repetitions);
else if(tpi==32 && size==1024)
x_run_test<32, 1024>(stats, operation, instances, count, repetitions);
else if(tpi==8 && size==2048)
x_run_test<8, 2048>(stats, operation, instances, count, repetitions);
else if(tpi==16 && size==2048)
x_run_test<16, 2048>(stats, operation, instances, count, repetitions);
else if(tpi==32 && size==2048)
x_run_test<32, 2048>(stats, operation, instances, count, repetitions);
else if(tpi==16 && size==3072)
x_run_test<16, 3072>(stats, operation, instances, count, repetitions);
else if(tpi==32 && size==3072)
x_run_test<32, 3072>(stats, operation, instances, count, repetitions);
else if(tpi==16 && size==4096)
x_run_test<16, 4096>(stats, operation, instances, count, repetitions);
else if(tpi==32 && size==4096)
x_run_test<32, 4096>(stats, operation, instances, count, repetitions);
else if(tpi==32 && size==5120)
x_run_test<32, 5120>(stats, operation, instances, count, repetitions);
else if(tpi==32 && size==6144)
x_run_test<32, 6144>(stats, operation, instances, count, repetitions);
else if(tpi==32 && size==7168)
x_run_test<32, 7168>(stats, operation, instances, count, repetitions);
else if(tpi==32 && size==8192)
x_run_test<32, 8192>(stats, operation, instances, count, repetitions);
else {
printf("internal error -- tpi/size -- needs to be added to x_run_test in xmp_tester.cu\n");
exit(1);
}
}
void *x_generate_data(gmp_randstate_t state, uint32_t tpi, uint32_t size, uint32_t count) {
if(size==128)
return (void *)xmp_tester<32, 128>::x_generate_instances(state, count);
else if(size==256)
return (void *)xmp_tester<32, 256>::x_generate_instances(state, count);
else if(size==512)
return (void *)xmp_tester<32, 512>::x_generate_instances(state, count);
else if(size==1024)
return (void *)xmp_tester<32, 1024>::x_generate_instances(state, count);
else if(size==2048)
return (void *)xmp_tester<32, 2048>::x_generate_instances(state, count);
else if(size==3072)
return (void *)xmp_tester<32, 3072>::x_generate_instances(state, count);
else if(size==4096)
return (void *)xmp_tester<32, 4096>::x_generate_instances(state, count);
else if(size==5120)
return (void *)xmp_tester<32, 5120>::x_generate_instances(state, count);
else if(size==6144)
return (void *)xmp_tester<32, 6144>::x_generate_instances(state, count);
else if(size==7168)
return (void *)xmp_tester<32, 7168>::x_generate_instances(state, count);
else if(size==8192)
return (void *)xmp_tester<32, 8192>::x_generate_instances(state, count);
else {
printf("Unsupported size -- needs to be added to x_generate_data in xmp_tester.cu\n");
exit(1);
}
}
void x_free_data(void *data, uint32_t count) {
free(data);
}
#ifndef SIZE
#define SIZE 1024
#endif
#ifndef INSTANCES
#define INSTANCES 200000
#endif
#ifndef MAX_SIZES
#define MAX_SIZES 25
#endif
#ifndef MAX_TPIS
#define MAX_TPIS 4
#endif
#ifndef RUNS
#define RUNS 5
#endif
int main(int argc, const char *argv[]) {
gmp_randstate_t state;
void *data;
bool tests[XT_LAST-XT_FIRST+1];
int sizes[MAX_SIZES];
int tpis[MAX_TPIS];
bool all_tests=true;
int sizes_count=0, tpis_count=0;
stats_t *chain, *last, *stats;
gmp_randinit_default(state);
for(int index=XT_FIRST;index<=XT_LAST;index++)
tests[index-XT_FIRST]=0;
for(int index=0;index<MAX_SIZES;index++)
sizes[index]=0;
for(int index=1;index<argc;index++) {
test_t parse;
int size;
parse=xt_parse(argv[index]);
if(parse!=test_unknown) {
if(parse>=XT_FIRST && parse<=XT_LAST) {
tests[parse-XT_FIRST]=true;
all_tests=false;
}
else {
printf("test is only available for xmp\n");
exit(1);
}
}
else {
size=atoi(argv[index]);
if(size!=0)
if(size==4 || size==8 || size==16 || size==32)
tpis[tpis_count++]=size;
else
sizes[sizes_count++]=size;
else {
printf("invalid test/size: %s\n", argv[index]);
exit(1);
}
}
}
for(int i=0;i<sizes_count;i++)
for(int j=i+1;j<sizes_count;j++)
if(sizes[i]>sizes[j]) {
int s=sizes[i];
sizes[i]=sizes[j];
sizes[j]=s;
}
if(all_tests) {
for(int32_t testIndex=XT_FIRST;testIndex<=XT_LAST;testIndex++) {
test_t test=static_cast<test_t>(testIndex);
tests[test-XT_FIRST]=true;
}
}
if(tpis_count==0) {
tpis[tpis_count++]=4;
tpis[tpis_count++]=8;
tpis[tpis_count++]=16;
tpis[tpis_count++]=32;
}
if(sizes_count==0) {
sizes[sizes_count++]=128;
sizes[sizes_count++]=256;
sizes[sizes_count++]=512;
sizes[sizes_count++]=1024;
sizes[sizes_count++]=2048;
sizes[sizes_count++]=3072;
sizes[sizes_count++]=4096;
sizes[sizes_count++]=5120;
sizes[sizes_count++]=6144;
sizes[sizes_count++]=7168;
sizes[sizes_count++]=8192;
}
chain=NULL;
last=NULL;
for(int index=0;index<sizes_count;index++) {
if(!x_supported_size(sizes[index]))
printf("... %d ... invalid test size ...\n", sizes[index]);
printf("... generating data ...\n");
data=x_generate_data(state, 32, sizes[index], INSTANCES);
for(int tpi_index=0;tpi_index<tpis_count;tpi_index++) {
for(int32_t testIndex=XT_FIRST;testIndex<=XT_LAST;testIndex++) {
test_t test=static_cast<test_t>(testIndex);
if(tests[test-XT_FIRST]) {
stats=(stats_t *)malloc(sizeof(stats_s));
if(!x_supported_tpi_size(tpis[tpi_index], sizes[index]))
continue;
printf("... %s %d:%d ... ", test_name(test), sizes[index], tpis[tpi_index]);
fflush(stdout);
x_run_test(stats, test, tpis[tpi_index], sizes[index], data, INSTANCES, RUNS);
if(chain==NULL)
chain=stats;
else
last->next=stats;
last=stats;
}
}
}
x_free_data(data, INSTANCES);
}
printf("Done...\n");
FILE *report=fopen("gpu_throughput_report.csv", "w");
if(report==NULL) {
printf("Unable to open \"gpu_throughput_report.csv\" in the local directory for writing\n");
exit(1);
}
else {
printf("Generating \"gpu_throughput_report.csv\"");
stats_report(report, false, chain, tests, XT_FIRST, XT_LAST, sizes, sizes_count);
fclose(report);
}
printf("\n\n");
stats_report(stdout, true, chain, tests, XT_FIRST, XT_LAST, sizes, sizes_count);
printf("\n");
}
|
the_stack
|
#define TPB 256
static uint32_t *h_GNonces[MAX_GPUS];
static uint32_t *d_GNonces[MAX_GPUS];
__constant__ uint32_t pTarget[8];
__device__ static uint32_t T0up[] = {
0xA5F432C6, 0x84976FF8, 0x99B05EEE, 0x8D8C7AF6, 0x0D17E8FF, 0xBDDC0AD6, 0xB1C816DE, 0x54FC6D91, 0x50F09060, 0x03050702, 0xA9E02ECE, 0x7D87D156, 0x192BCCE7, 0x62A613B5, 0xE6317C4D, 0x9AB559EC,
0x45CF408F, 0x9DBCA31F, 0x40C04989, 0x879268FA, 0x153FD0EF, 0xEB2694B2, 0xC940CE8E, 0x0B1DE6FB, 0xEC2F6E41, 0x67A91AB3, 0xFD1C435F, 0xEA256045, 0xBFDAF923, 0xF7025153, 0x96A145E4, 0x5BED769B,
0xC25D2875, 0x1C24C5E1, 0xAEE9D43D, 0x6ABEF24C, 0x5AEE826C, 0x41C3BD7E, 0x0206F3F5, 0x4FD15283, 0x5CE48C68, 0xF4075651, 0x345C8DD1, 0x0818E1F9, 0x93AE4CE2, 0x73953EAB, 0x53F59762, 0x3F416B2A,
0x0C141C08, 0x52F66395, 0x65AFE946, 0x5EE27F9D, 0x28784830, 0xA1F8CF37, 0x0F111B0A, 0xB5C4EB2F, 0x091B150E, 0x365A7E24, 0x9BB6AD1B, 0x3D4798DF, 0x266AA7CD, 0x69BBF54E, 0xCD4C337F, 0x9FBA50EA,
0x1B2D3F12, 0x9EB9A41D, 0x749CC458, 0x2E724634, 0x2D774136, 0xB2CD11DC, 0xEE299DB4, 0xFB164D5B, 0xF601A5A4, 0x4DD7A176, 0x61A314B7, 0xCE49347D, 0x7B8DDF52, 0x3E429FDD, 0x7193CD5E, 0x97A2B113,
0xF504A2A6, 0x68B801B9, 0x00000000, 0x2C74B5C1, 0x60A0E040, 0x1F21C2E3, 0xC8433A79, 0xED2C9AB6, 0xBED90DD4, 0x46CA478D, 0xD9701767, 0x4BDDAF72, 0xDE79ED94, 0xD467FF98, 0xE82393B0, 0x4ADE5B85,
0x6BBD06BB, 0x2A7EBBC5, 0xE5347B4F, 0x163AD7ED, 0xC554D286, 0xD762F89A, 0x55FF9966, 0x94A7B611, 0xCF4AC08A, 0x1030D9E9, 0x060A0E04, 0x819866FE, 0xF00BABA0, 0x44CCB478, 0xBAD5F025, 0xE33E754B,
0xF30EACA2, 0xFE19445D, 0xC05BDB80, 0x8A858005, 0xADECD33F, 0xBCDFFE21, 0x48D8A870, 0x040CFDF1, 0xDF7A1963, 0xC1582F77, 0x759F30AF, 0x63A5E742, 0x30507020, 0x1A2ECBE5, 0x0E12EFFD, 0x6DB708BF,
0x4CD45581, 0x143C2418, 0x355F7926, 0x2F71B2C3, 0xE13886BE, 0xA2FDC835, 0xCC4FC788, 0x394B652E, 0x57F96A93, 0xF20D5855, 0x829D61FC, 0x47C9B37A, 0xACEF27C8, 0xE73288BA, 0x2B7D4F32, 0x95A442E6,
0xA0FB3BC0, 0x98B3AA19, 0xD168F69E, 0x7F8122A3, 0x66AAEE44, 0x7E82D654, 0xABE6DD3B, 0x839E950B, 0xCA45C98C, 0x297BBCC7, 0xD36E056B, 0x3C446C28, 0x798B2CA7, 0xE23D81BC, 0x1D273116, 0x769A37AD,
0x3B4D96DB, 0x56FA9E64, 0x4ED2A674, 0x1E223614, 0xDB76E492, 0x0A1E120C, 0x6CB4FC48, 0xE4378FB8, 0x5DE7789F, 0x6EB20FBD, 0xEF2A6943, 0xA6F135C4, 0xA8E3DA39, 0xA4F7C631, 0x37598AD3, 0x8B8674F2,
0x325683D5, 0x43C54E8B, 0x59EB856E, 0xB7C218DA, 0x8C8F8E01, 0x64AC1DB1, 0xD26DF19C, 0xE03B7249, 0xB4C71FD8, 0xFA15B9AC, 0x0709FAF3, 0x256FA0CF, 0xAFEA20CA, 0x8E897DF4, 0xE9206747, 0x18283810,
0xD5640B6F, 0x888373F0, 0x6FB1FB4A, 0x7296CA5C, 0x246C5438, 0xF1085F57, 0xC7522173, 0x51F36497, 0x2365AECB, 0x7C8425A1, 0x9CBF57E8, 0x21635D3E, 0xDD7CEA96, 0xDC7F1E61, 0x86919C0D, 0x85949B0F,
0x90AB4BE0, 0x42C6BA7C, 0xC4572671, 0xAAE529CC, 0xD873E390, 0x050F0906, 0x0103F4F7, 0x12362A1C, 0xA3FE3CC2, 0x5FE18B6A, 0xF910BEAE, 0xD06B0269, 0x91A8BF17, 0x58E87199, 0x2769533A, 0xB9D0F727,
0x384891D9, 0x1335DEEB, 0xB3CEE52B, 0x33557722, 0xBBD604D2, 0x709039A9, 0x89808707, 0xA7F2C133, 0xB6C1EC2D, 0x22665A3C, 0x92ADB815, 0x2060A9C9, 0x49DB5C87, 0xFF1AB0AA, 0x7888D850, 0x7A8E2BA5,
0x8F8A8903, 0xF8134A59, 0x809B9209, 0x1739231A, 0xDA751065, 0x315384D7, 0xC651D584, 0xB8D303D0, 0xC35EDC82, 0xB0CBE229, 0x7799C35A, 0x11332D1E, 0xCB463D7B, 0xFC1FB7A8, 0xD6610C6D, 0x3A4E622C
};
__device__ static uint32_t T0dn[] = {
0xC6A597F4, 0xF884EB97, 0xEE99C7B0, 0xF68DF78C, 0xFF0DE517, 0xD6BDB7DC, 0xDEB1A7C8, 0x915439FC, 0x6050C0F0, 0x02030405, 0xCEA987E0, 0x567DAC87, 0xE719D52B, 0xB56271A6, 0x4DE69A31, 0xEC9AC3B5,
0x8F4505CF, 0x1F9D3EBC, 0x894009C0, 0xFA87EF92, 0xEF15C53F, 0xB2EB7F26, 0x8EC90740, 0xFB0BED1D, 0x41EC822F, 0xB3677DA9, 0x5FFDBE1C, 0x45EA8A25, 0x23BF46DA, 0x53F7A602, 0xE496D3A1, 0x9B5B2DED,
0x75C2EA5D, 0xE11CD924, 0x3DAE7AE9, 0x4C6A98BE, 0x6C5AD8EE, 0x7E41FCC3, 0xF502F106, 0x834F1DD1, 0x685CD0E4, 0x51F4A207, 0xD134B95C, 0xF908E918, 0xE293DFAE, 0xAB734D95, 0x6253C4F5, 0x2A3F5441,
0x080C1014, 0x955231F6, 0x46658CAF, 0x9D5E21E2, 0x30286078, 0x37A16EF8, 0x0A0F1411, 0x2FB55EC4, 0x0E091C1B, 0x2436485A, 0x1B9B36B6, 0xDF3DA547, 0xCD26816A, 0x4E699CBB, 0x7FCDFE4C, 0xEA9FCFBA,
0x121B242D, 0x1D9E3AB9, 0x5874B09C, 0x342E6872, 0x362D6C77, 0xDCB2A3CD, 0xB4EE7329, 0x5BFBB616, 0xA4F65301, 0x764DECD7, 0xB76175A3, 0x7DCEFA49, 0x527BA48D, 0xDD3EA142, 0x5E71BC93, 0x139726A2,
0xA6F55704, 0xB96869B8, 0x00000000, 0xC12C9974, 0x406080A0, 0xE31FDD21, 0x79C8F243, 0xB6ED772C, 0xD4BEB3D9, 0x8D4601CA, 0x67D9CE70, 0x724BE4DD, 0x94DE3379, 0x98D42B67, 0xB0E87B23, 0x854A11DE,
0xBB6B6DBD, 0xC52A917E, 0x4FE59E34, 0xED16C13A, 0x86C51754, 0x9AD72F62, 0x6655CCFF, 0x119422A7, 0x8ACF0F4A, 0xE910C930, 0x0406080A, 0xFE81E798, 0xA0F05B0B, 0x7844F0CC, 0x25BA4AD5, 0x4BE3963E,
0xA2F35F0E, 0x5DFEBA19, 0x80C01B5B, 0x058A0A85, 0x3FAD7EEC, 0x21BC42DF, 0x7048E0D8, 0xF104F90C, 0x63DFC67A, 0x77C1EE58, 0xAF75459F, 0x426384A5, 0x20304050, 0xE51AD12E, 0xFD0EE112, 0xBF6D65B7,
0x814C19D4, 0x1814303C, 0x26354C5F, 0xC32F9D71, 0xBEE16738, 0x35A26AFD, 0x88CC0B4F, 0x2E395C4B, 0x93573DF9, 0x55F2AA0D, 0xFC82E39D, 0x7A47F4C9, 0xC8AC8BEF, 0xBAE76F32, 0x322B647D, 0xE695D7A4,
0xC0A09BFB, 0x199832B3, 0x9ED12768, 0xA37F5D81, 0x446688AA, 0x547EA882, 0x3BAB76E6, 0x0B83169E, 0x8CCA0345, 0xC729957B, 0x6BD3D66E, 0x283C5044, 0xA779558B, 0xBCE2633D, 0x161D2C27, 0xAD76419A,
0xDB3BAD4D, 0x6456C8FA, 0x744EE8D2, 0x141E2822, 0x92DB3F76, 0x0C0A181E, 0x486C90B4, 0xB8E46B37, 0x9F5D25E7, 0xBD6E61B2, 0x43EF862A, 0xC4A693F1, 0x39A872E3, 0x31A462F7, 0xD337BD59, 0xF28BFF86,
0xD532B156, 0x8B430DC5, 0x6E59DCEB, 0xDAB7AFC2, 0x018C028F, 0xB16479AC, 0x9CD2236D, 0x49E0923B, 0xD8B4ABC7, 0xACFA4315, 0xF307FD09, 0xCF25856F, 0xCAAF8FEA, 0xF48EF389, 0x47E98E20, 0x10182028,
0x6FD5DE64, 0xF088FB83, 0x4A6F94B1, 0x5C72B896, 0x3824706C, 0x57F1AE08, 0x73C7E652, 0x975135F3, 0xCB238D65, 0xA17C5984, 0xE89CCBBF, 0x3E217C63, 0x96DD377C, 0x61DCC27F, 0x0D861A91, 0x0F851E94,
0xE090DBAB, 0x7C42F8C6, 0x71C4E257, 0xCCAA83E5, 0x90D83B73, 0x06050C0F, 0xF701F503, 0x1C123836, 0xC2A39FFE, 0x6A5FD4E1, 0xAEF94710, 0x69D0D26B, 0x17912EA8, 0x995829E8, 0x3A277469, 0x27B94ED0,
0xD938A948, 0xEB13CD35, 0x2BB356CE, 0x22334455, 0xD2BBBFD6, 0xA9704990, 0x07890E80, 0x33A766F2, 0x2DB65AC1, 0x3C227866, 0x15922AAD, 0xC9208960, 0x874915DB, 0xAAFF4F1A, 0x5078A088, 0xA57A518E,
0x038F068A, 0x59F8B213, 0x0980129B, 0x1A173439, 0x65DACA75, 0xD731B553, 0x84C61351, 0xD0B8BBD3, 0x82C31F5E, 0x29B052CB, 0x5A77B499, 0x1E113C33, 0x7BCBF646, 0xA8FC4B1F, 0x6DD6DA61, 0x2C3A584E
};
__device__ static uint32_t T1up[] = {
0xF432C6C6, 0x976FF8F8, 0xB05EEEEE, 0x8C7AF6F6, 0x17E8FFFF, 0xDC0AD6D6, 0xC816DEDE, 0xFC6D9191, 0xF0906060, 0x05070202, 0xE02ECECE, 0x87D15656, 0x2BCCE7E7, 0xA613B5B5, 0x317C4D4D, 0xB559ECEC,
0xCF408F8F, 0xBCA31F1F, 0xC0498989, 0x9268FAFA, 0x3FD0EFEF, 0x2694B2B2, 0x40CE8E8E, 0x1DE6FBFB, 0x2F6E4141, 0xA91AB3B3, 0x1C435F5F, 0x25604545, 0xDAF92323, 0x02515353, 0xA145E4E4, 0xED769B9B,
0x5D287575, 0x24C5E1E1, 0xE9D43D3D, 0xBEF24C4C, 0xEE826C6C, 0xC3BD7E7E, 0x06F3F5F5, 0xD1528383, 0xE48C6868, 0x07565151, 0x5C8DD1D1, 0x18E1F9F9, 0xAE4CE2E2, 0x953EABAB, 0xF5976262, 0x416B2A2A,
0x141C0808, 0xF6639595, 0xAFE94646, 0xE27F9D9D, 0x78483030, 0xF8CF3737, 0x111B0A0A, 0xC4EB2F2F, 0x1B150E0E, 0x5A7E2424, 0xB6AD1B1B, 0x4798DFDF, 0x6AA7CDCD, 0xBBF54E4E, 0x4C337F7F, 0xBA50EAEA,
0x2D3F1212, 0xB9A41D1D, 0x9CC45858, 0x72463434, 0x77413636, 0xCD11DCDC, 0x299DB4B4, 0x164D5B5B, 0x01A5A4A4, 0xD7A17676, 0xA314B7B7, 0x49347D7D, 0x8DDF5252, 0x429FDDDD, 0x93CD5E5E, 0xA2B11313,
0x04A2A6A6, 0xB801B9B9, 0x00000000, 0x74B5C1C1, 0xA0E04040, 0x21C2E3E3, 0x433A7979, 0x2C9AB6B6, 0xD90DD4D4, 0xCA478D8D, 0x70176767, 0xDDAF7272, 0x79ED9494, 0x67FF9898, 0x2393B0B0, 0xDE5B8585,
0xBD06BBBB, 0x7EBBC5C5, 0x347B4F4F, 0x3AD7EDED, 0x54D28686, 0x62F89A9A, 0xFF996666, 0xA7B61111, 0x4AC08A8A, 0x30D9E9E9, 0x0A0E0404, 0x9866FEFE, 0x0BABA0A0, 0xCCB47878, 0xD5F02525, 0x3E754B4B,
0x0EACA2A2, 0x19445D5D, 0x5BDB8080, 0x85800505, 0xECD33F3F, 0xDFFE2121, 0xD8A87070, 0x0CFDF1F1, 0x7A196363, 0x582F7777, 0x9F30AFAF, 0xA5E74242, 0x50702020, 0x2ECBE5E5, 0x12EFFDFD, 0xB708BFBF,
0xD4558181, 0x3C241818, 0x5F792626, 0x71B2C3C3, 0x3886BEBE, 0xFDC83535, 0x4FC78888, 0x4B652E2E, 0xF96A9393, 0x0D585555, 0x9D61FCFC, 0xC9B37A7A, 0xEF27C8C8, 0x3288BABA, 0x7D4F3232, 0xA442E6E6,
0xFB3BC0C0, 0xB3AA1919, 0x68F69E9E, 0x8122A3A3, 0xAAEE4444, 0x82D65454, 0xE6DD3B3B, 0x9E950B0B, 0x45C98C8C, 0x7BBCC7C7, 0x6E056B6B, 0x446C2828, 0x8B2CA7A7, 0x3D81BCBC, 0x27311616, 0x9A37ADAD,
0x4D96DBDB, 0xFA9E6464, 0xD2A67474, 0x22361414, 0x76E49292, 0x1E120C0C, 0xB4FC4848, 0x378FB8B8, 0xE7789F9F, 0xB20FBDBD, 0x2A694343, 0xF135C4C4, 0xE3DA3939, 0xF7C63131, 0x598AD3D3, 0x8674F2F2,
0x5683D5D5, 0xC54E8B8B, 0xEB856E6E, 0xC218DADA, 0x8F8E0101, 0xAC1DB1B1, 0x6DF19C9C, 0x3B724949, 0xC71FD8D8, 0x15B9ACAC, 0x09FAF3F3, 0x6FA0CFCF, 0xEA20CACA, 0x897DF4F4, 0x20674747, 0x28381010,
0x640B6F6F, 0x8373F0F0, 0xB1FB4A4A, 0x96CA5C5C, 0x6C543838, 0x085F5757, 0x52217373, 0xF3649797, 0x65AECBCB, 0x8425A1A1, 0xBF57E8E8, 0x635D3E3E, 0x7CEA9696, 0x7F1E6161, 0x919C0D0D, 0x949B0F0F,
0xAB4BE0E0, 0xC6BA7C7C, 0x57267171, 0xE529CCCC, 0x73E39090, 0x0F090606, 0x03F4F7F7, 0x362A1C1C, 0xFE3CC2C2, 0xE18B6A6A, 0x10BEAEAE, 0x6B026969, 0xA8BF1717, 0xE8719999, 0x69533A3A, 0xD0F72727,
0x4891D9D9, 0x35DEEBEB, 0xCEE52B2B, 0x55772222, 0xD604D2D2, 0x9039A9A9, 0x80870707, 0xF2C13333, 0xC1EC2D2D, 0x665A3C3C, 0xADB81515, 0x60A9C9C9, 0xDB5C8787, 0x1AB0AAAA, 0x88D85050, 0x8E2BA5A5,
0x8A890303, 0x134A5959, 0x9B920909, 0x39231A1A, 0x75106565, 0x5384D7D7, 0x51D58484, 0xD303D0D0, 0x5EDC8282, 0xCBE22929, 0x99C35A5A, 0x332D1E1E, 0x463D7B7B, 0x1FB7A8A8, 0x610C6D6D, 0x4E622C2C
};
__device__ static uint32_t T1dn[] = {
0xA597F4A5, 0x84EB9784, 0x99C7B099, 0x8DF78C8D, 0x0DE5170D, 0xBDB7DCBD, 0xB1A7C8B1, 0x5439FC54, 0x50C0F050, 0x03040503, 0xA987E0A9, 0x7DAC877D, 0x19D52B19, 0x6271A662, 0xE69A31E6, 0x9AC3B59A,
0x4505CF45, 0x9D3EBC9D, 0x4009C040, 0x87EF9287, 0x15C53F15, 0xEB7F26EB, 0xC90740C9, 0x0BED1D0B, 0xEC822FEC, 0x677DA967, 0xFDBE1CFD, 0xEA8A25EA, 0xBF46DABF, 0xF7A602F7, 0x96D3A196, 0x5B2DED5B,
0xC2EA5DC2, 0x1CD9241C, 0xAE7AE9AE, 0x6A98BE6A, 0x5AD8EE5A, 0x41FCC341, 0x02F10602, 0x4F1DD14F, 0x5CD0E45C, 0xF4A207F4, 0x34B95C34, 0x08E91808, 0x93DFAE93, 0x734D9573, 0x53C4F553, 0x3F54413F,
0x0C10140C, 0x5231F652, 0x658CAF65, 0x5E21E25E, 0x28607828, 0xA16EF8A1, 0x0F14110F, 0xB55EC4B5, 0x091C1B09, 0x36485A36, 0x9B36B69B, 0x3DA5473D, 0x26816A26, 0x699CBB69, 0xCDFE4CCD, 0x9FCFBA9F,
0x1B242D1B, 0x9E3AB99E, 0x74B09C74, 0x2E68722E, 0x2D6C772D, 0xB2A3CDB2, 0xEE7329EE, 0xFBB616FB, 0xF65301F6, 0x4DECD74D, 0x6175A361, 0xCEFA49CE, 0x7BA48D7B, 0x3EA1423E, 0x71BC9371, 0x9726A297,
0xF55704F5, 0x6869B868, 0x00000000, 0x2C99742C, 0x6080A060, 0x1FDD211F, 0xC8F243C8, 0xED772CED, 0xBEB3D9BE, 0x4601CA46, 0xD9CE70D9, 0x4BE4DD4B, 0xDE3379DE, 0xD42B67D4, 0xE87B23E8, 0x4A11DE4A,
0x6B6DBD6B, 0x2A917E2A, 0xE59E34E5, 0x16C13A16, 0xC51754C5, 0xD72F62D7, 0x55CCFF55, 0x9422A794, 0xCF0F4ACF, 0x10C93010, 0x06080A06, 0x81E79881, 0xF05B0BF0, 0x44F0CC44, 0xBA4AD5BA, 0xE3963EE3,
0xF35F0EF3, 0xFEBA19FE, 0xC01B5BC0, 0x8A0A858A, 0xAD7EECAD, 0xBC42DFBC, 0x48E0D848, 0x04F90C04, 0xDFC67ADF, 0xC1EE58C1, 0x75459F75, 0x6384A563, 0x30405030, 0x1AD12E1A, 0x0EE1120E, 0x6D65B76D,
0x4C19D44C, 0x14303C14, 0x354C5F35, 0x2F9D712F, 0xE16738E1, 0xA26AFDA2, 0xCC0B4FCC, 0x395C4B39, 0x573DF957, 0xF2AA0DF2, 0x82E39D82, 0x47F4C947, 0xAC8BEFAC, 0xE76F32E7, 0x2B647D2B, 0x95D7A495,
0xA09BFBA0, 0x9832B398, 0xD12768D1, 0x7F5D817F, 0x6688AA66, 0x7EA8827E, 0xAB76E6AB, 0x83169E83, 0xCA0345CA, 0x29957B29, 0xD3D66ED3, 0x3C50443C, 0x79558B79, 0xE2633DE2, 0x1D2C271D, 0x76419A76,
0x3BAD4D3B, 0x56C8FA56, 0x4EE8D24E, 0x1E28221E, 0xDB3F76DB, 0x0A181E0A, 0x6C90B46C, 0xE46B37E4, 0x5D25E75D, 0x6E61B26E, 0xEF862AEF, 0xA693F1A6, 0xA872E3A8, 0xA462F7A4, 0x37BD5937, 0x8BFF868B,
0x32B15632, 0x430DC543, 0x59DCEB59, 0xB7AFC2B7, 0x8C028F8C, 0x6479AC64, 0xD2236DD2, 0xE0923BE0, 0xB4ABC7B4, 0xFA4315FA, 0x07FD0907, 0x25856F25, 0xAF8FEAAF, 0x8EF3898E, 0xE98E20E9, 0x18202818,
0xD5DE64D5, 0x88FB8388, 0x6F94B16F, 0x72B89672, 0x24706C24, 0xF1AE08F1, 0xC7E652C7, 0x5135F351, 0x238D6523, 0x7C59847C, 0x9CCBBF9C, 0x217C6321, 0xDD377CDD, 0xDCC27FDC, 0x861A9186, 0x851E9485,
0x90DBAB90, 0x42F8C642, 0xC4E257C4, 0xAA83E5AA, 0xD83B73D8, 0x050C0F05, 0x01F50301, 0x12383612, 0xA39FFEA3, 0x5FD4E15F, 0xF94710F9, 0xD0D26BD0, 0x912EA891, 0x5829E858, 0x27746927, 0xB94ED0B9,
0x38A94838, 0x13CD3513, 0xB356CEB3, 0x33445533, 0xBBBFD6BB, 0x70499070, 0x890E8089, 0xA766F2A7, 0xB65AC1B6, 0x22786622, 0x922AAD92, 0x20896020, 0x4915DB49, 0xFF4F1AFF, 0x78A08878, 0x7A518E7A,
0x8F068A8F, 0xF8B213F8, 0x80129B80, 0x17343917, 0xDACA75DA, 0x31B55331, 0xC61351C6, 0xB8BBD3B8, 0xC31F5EC3, 0xB052CBB0, 0x77B49977, 0x113C3311, 0xCBF646CB, 0xFC4B1FFC, 0xD6DA61D6, 0x3A584E3A
};
__device__ static uint32_t T2up[] = {
0x32C6C6A5, 0x6FF8F884, 0x5EEEEE99, 0x7AF6F68D, 0xE8FFFF0D, 0x0AD6D6BD, 0x16DEDEB1, 0x6D919154, 0x90606050, 0x07020203, 0x2ECECEA9, 0xD156567D, 0xCCE7E719, 0x13B5B562, 0x7C4D4DE6, 0x59ECEC9A,
0x408F8F45, 0xA31F1F9D, 0x49898940, 0x68FAFA87, 0xD0EFEF15, 0x94B2B2EB, 0xCE8E8EC9, 0xE6FBFB0B, 0x6E4141EC, 0x1AB3B367, 0x435F5FFD, 0x604545EA, 0xF92323BF, 0x515353F7, 0x45E4E496, 0x769B9B5B,
0x287575C2, 0xC5E1E11C, 0xD43D3DAE, 0xF24C4C6A, 0x826C6C5A, 0xBD7E7E41, 0xF3F5F502, 0x5283834F, 0x8C68685C, 0x565151F4, 0x8DD1D134, 0xE1F9F908, 0x4CE2E293, 0x3EABAB73, 0x97626253, 0x6B2A2A3F,
0x1C08080C, 0x63959552, 0xE9464665, 0x7F9D9D5E, 0x48303028, 0xCF3737A1, 0x1B0A0A0F, 0xEB2F2FB5, 0x150E0E09, 0x7E242436, 0xAD1B1B9B, 0x98DFDF3D, 0xA7CDCD26, 0xF54E4E69, 0x337F7FCD, 0x50EAEA9F,
0x3F12121B, 0xA41D1D9E, 0xC4585874, 0x4634342E, 0x4136362D, 0x11DCDCB2, 0x9DB4B4EE, 0x4D5B5BFB, 0xA5A4A4F6, 0xA176764D, 0x14B7B761, 0x347D7DCE, 0xDF52527B, 0x9FDDDD3E, 0xCD5E5E71, 0xB1131397,
0xA2A6A6F5, 0x01B9B968, 0x00000000, 0xB5C1C12C, 0xE0404060, 0xC2E3E31F, 0x3A7979C8, 0x9AB6B6ED, 0x0DD4D4BE, 0x478D8D46, 0x176767D9, 0xAF72724B, 0xED9494DE, 0xFF9898D4, 0x93B0B0E8, 0x5B85854A,
0x06BBBB6B, 0xBBC5C52A, 0x7B4F4FE5, 0xD7EDED16, 0xD28686C5, 0xF89A9AD7, 0x99666655, 0xB6111194, 0xC08A8ACF, 0xD9E9E910, 0x0E040406, 0x66FEFE81, 0xABA0A0F0, 0xB4787844, 0xF02525BA, 0x754B4BE3,
0xACA2A2F3, 0x445D5DFE, 0xDB8080C0, 0x8005058A, 0xD33F3FAD, 0xFE2121BC, 0xA8707048, 0xFDF1F104, 0x196363DF, 0x2F7777C1, 0x30AFAF75, 0xE7424263, 0x70202030, 0xCBE5E51A, 0xEFFDFD0E, 0x08BFBF6D,
0x5581814C, 0x24181814, 0x79262635, 0xB2C3C32F, 0x86BEBEE1, 0xC83535A2, 0xC78888CC, 0x652E2E39, 0x6A939357, 0x585555F2, 0x61FCFC82, 0xB37A7A47, 0x27C8C8AC, 0x88BABAE7, 0x4F32322B, 0x42E6E695,
0x3BC0C0A0, 0xAA191998, 0xF69E9ED1, 0x22A3A37F, 0xEE444466, 0xD654547E, 0xDD3B3BAB, 0x950B0B83, 0xC98C8CCA, 0xBCC7C729, 0x056B6BD3, 0x6C28283C, 0x2CA7A779, 0x81BCBCE2, 0x3116161D, 0x37ADAD76,
0x96DBDB3B, 0x9E646456, 0xA674744E, 0x3614141E, 0xE49292DB, 0x120C0C0A, 0xFC48486C, 0x8FB8B8E4, 0x789F9F5D, 0x0FBDBD6E, 0x694343EF, 0x35C4C4A6, 0xDA3939A8, 0xC63131A4, 0x8AD3D337, 0x74F2F28B,
0x83D5D532, 0x4E8B8B43, 0x856E6E59, 0x18DADAB7, 0x8E01018C, 0x1DB1B164, 0xF19C9CD2, 0x724949E0, 0x1FD8D8B4, 0xB9ACACFA, 0xFAF3F307, 0xA0CFCF25, 0x20CACAAF, 0x7DF4F48E, 0x674747E9, 0x38101018,
0x0B6F6FD5, 0x73F0F088, 0xFB4A4A6F, 0xCA5C5C72, 0x54383824, 0x5F5757F1, 0x217373C7, 0x64979751, 0xAECBCB23, 0x25A1A17C, 0x57E8E89C, 0x5D3E3E21, 0xEA9696DD, 0x1E6161DC, 0x9C0D0D86, 0x9B0F0F85,
0x4BE0E090, 0xBA7C7C42, 0x267171C4, 0x29CCCCAA, 0xE39090D8, 0x09060605, 0xF4F7F701, 0x2A1C1C12, 0x3CC2C2A3, 0x8B6A6A5F, 0xBEAEAEF9, 0x026969D0, 0xBF171791, 0x71999958, 0x533A3A27, 0xF72727B9,
0x91D9D938, 0xDEEBEB13, 0xE52B2BB3, 0x77222233, 0x04D2D2BB, 0x39A9A970, 0x87070789, 0xC13333A7, 0xEC2D2DB6, 0x5A3C3C22, 0xB8151592, 0xA9C9C920, 0x5C878749, 0xB0AAAAFF, 0xD8505078, 0x2BA5A57A,
0x8903038F, 0x4A5959F8, 0x92090980, 0x231A1A17, 0x106565DA, 0x84D7D731, 0xD58484C6, 0x03D0D0B8, 0xDC8282C3, 0xE22929B0, 0xC35A5A77, 0x2D1E1E11, 0x3D7B7BCB, 0xB7A8A8FC, 0x0C6D6DD6, 0x622C2C3A
};
__device__ static uint32_t T2dn[] = {
0x97F4A5F4, 0xEB978497, 0xC7B099B0, 0xF78C8D8C, 0xE5170D17, 0xB7DCBDDC, 0xA7C8B1C8, 0x39FC54FC, 0xC0F050F0, 0x04050305, 0x87E0A9E0, 0xAC877D87, 0xD52B192B, 0x71A662A6, 0x9A31E631, 0xC3B59AB5,
0x05CF45CF, 0x3EBC9DBC, 0x09C040C0, 0xEF928792, 0xC53F153F, 0x7F26EB26, 0x0740C940, 0xED1D0B1D, 0x822FEC2F, 0x7DA967A9, 0xBE1CFD1C, 0x8A25EA25, 0x46DABFDA, 0xA602F702, 0xD3A196A1, 0x2DED5BED,
0xEA5DC25D, 0xD9241C24, 0x7AE9AEE9, 0x98BE6ABE, 0xD8EE5AEE, 0xFCC341C3, 0xF1060206, 0x1DD14FD1, 0xD0E45CE4, 0xA207F407, 0xB95C345C, 0xE9180818, 0xDFAE93AE, 0x4D957395, 0xC4F553F5, 0x54413F41,
0x10140C14, 0x31F652F6, 0x8CAF65AF, 0x21E25EE2, 0x60782878, 0x6EF8A1F8, 0x14110F11, 0x5EC4B5C4, 0x1C1B091B, 0x485A365A, 0x36B69BB6, 0xA5473D47, 0x816A266A, 0x9CBB69BB, 0xFE4CCD4C, 0xCFBA9FBA,
0x242D1B2D, 0x3AB99EB9, 0xB09C749C, 0x68722E72, 0x6C772D77, 0xA3CDB2CD, 0x7329EE29, 0xB616FB16, 0x5301F601, 0xECD74DD7, 0x75A361A3, 0xFA49CE49, 0xA48D7B8D, 0xA1423E42, 0xBC937193, 0x26A297A2,
0x5704F504, 0x69B868B8, 0x00000000, 0x99742C74, 0x80A060A0, 0xDD211F21, 0xF243C843, 0x772CED2C, 0xB3D9BED9, 0x01CA46CA, 0xCE70D970, 0xE4DD4BDD, 0x3379DE79, 0x2B67D467, 0x7B23E823, 0x11DE4ADE,
0x6DBD6BBD, 0x917E2A7E, 0x9E34E534, 0xC13A163A, 0x1754C554, 0x2F62D762, 0xCCFF55FF, 0x22A794A7, 0x0F4ACF4A, 0xC9301030, 0x080A060A, 0xE7988198, 0x5B0BF00B, 0xF0CC44CC, 0x4AD5BAD5, 0x963EE33E,
0x5F0EF30E, 0xBA19FE19, 0x1B5BC05B, 0x0A858A85, 0x7EECADEC, 0x42DFBCDF, 0xE0D848D8, 0xF90C040C, 0xC67ADF7A, 0xEE58C158, 0x459F759F, 0x84A563A5, 0x40503050, 0xD12E1A2E, 0xE1120E12, 0x65B76DB7,
0x19D44CD4, 0x303C143C, 0x4C5F355F, 0x9D712F71, 0x6738E138, 0x6AFDA2FD, 0x0B4FCC4F, 0x5C4B394B, 0x3DF957F9, 0xAA0DF20D, 0xE39D829D, 0xF4C947C9, 0x8BEFACEF, 0x6F32E732, 0x647D2B7D, 0xD7A495A4,
0x9BFBA0FB, 0x32B398B3, 0x2768D168, 0x5D817F81, 0x88AA66AA, 0xA8827E82, 0x76E6ABE6, 0x169E839E, 0x0345CA45, 0x957B297B, 0xD66ED36E, 0x50443C44, 0x558B798B, 0x633DE23D, 0x2C271D27, 0x419A769A,
0xAD4D3B4D, 0xC8FA56FA, 0xE8D24ED2, 0x28221E22, 0x3F76DB76, 0x181E0A1E, 0x90B46CB4, 0x6B37E437, 0x25E75DE7, 0x61B26EB2, 0x862AEF2A, 0x93F1A6F1, 0x72E3A8E3, 0x62F7A4F7, 0xBD593759, 0xFF868B86,
0xB1563256, 0x0DC543C5, 0xDCEB59EB, 0xAFC2B7C2, 0x028F8C8F, 0x79AC64AC, 0x236DD26D, 0x923BE03B, 0xABC7B4C7, 0x4315FA15, 0xFD090709, 0x856F256F, 0x8FEAAFEA, 0xF3898E89, 0x8E20E920, 0x20281828,
0xDE64D564, 0xFB838883, 0x94B16FB1, 0xB8967296, 0x706C246C, 0xAE08F108, 0xE652C752, 0x35F351F3, 0x8D652365, 0x59847C84, 0xCBBF9CBF, 0x7C632163, 0x377CDD7C, 0xC27FDC7F, 0x1A918691, 0x1E948594,
0xDBAB90AB, 0xF8C642C6, 0xE257C457, 0x83E5AAE5, 0x3B73D873, 0x0C0F050F, 0xF5030103, 0x38361236, 0x9FFEA3FE, 0xD4E15FE1, 0x4710F910, 0xD26BD06B, 0x2EA891A8, 0x29E858E8, 0x74692769, 0x4ED0B9D0,
0xA9483848, 0xCD351335, 0x56CEB3CE, 0x44553355, 0xBFD6BBD6, 0x49907090, 0x0E808980, 0x66F2A7F2, 0x5AC1B6C1, 0x78662266, 0x2AAD92AD, 0x89602060, 0x15DB49DB, 0x4F1AFF1A, 0xA0887888, 0x518E7A8E,
0x068A8F8A, 0xB213F813, 0x129B809B, 0x34391739, 0xCA75DA75, 0xB5533153, 0x1351C651, 0xBBD3B8D3, 0x1F5EC35E, 0x52CBB0CB, 0xB4997799, 0x3C331133, 0xF646CB46, 0x4B1FFC1F, 0xDA61D661, 0x584E3A4E
};
__device__ static uint32_t T3up[] = {
0xC6C6A597, 0xF8F884EB, 0xEEEE99C7, 0xF6F68DF7, 0xFFFF0DE5, 0xD6D6BDB7, 0xDEDEB1A7, 0x91915439, 0x606050C0, 0x02020304, 0xCECEA987, 0x56567DAC, 0xE7E719D5, 0xB5B56271, 0x4D4DE69A, 0xECEC9AC3,
0x8F8F4505, 0x1F1F9D3E, 0x89894009, 0xFAFA87EF, 0xEFEF15C5, 0xB2B2EB7F, 0x8E8EC907, 0xFBFB0BED, 0x4141EC82, 0xB3B3677D, 0x5F5FFDBE, 0x4545EA8A, 0x2323BF46, 0x5353F7A6, 0xE4E496D3, 0x9B9B5B2D,
0x7575C2EA, 0xE1E11CD9, 0x3D3DAE7A, 0x4C4C6A98, 0x6C6C5AD8, 0x7E7E41FC, 0xF5F502F1, 0x83834F1D, 0x68685CD0, 0x5151F4A2, 0xD1D134B9, 0xF9F908E9, 0xE2E293DF, 0xABAB734D, 0x626253C4, 0x2A2A3F54,
0x08080C10, 0x95955231, 0x4646658C, 0x9D9D5E21, 0x30302860, 0x3737A16E, 0x0A0A0F14, 0x2F2FB55E, 0x0E0E091C, 0x24243648, 0x1B1B9B36, 0xDFDF3DA5, 0xCDCD2681, 0x4E4E699C, 0x7F7FCDFE, 0xEAEA9FCF,
0x12121B24, 0x1D1D9E3A, 0x585874B0, 0x34342E68, 0x36362D6C, 0xDCDCB2A3, 0xB4B4EE73, 0x5B5BFBB6, 0xA4A4F653, 0x76764DEC, 0xB7B76175, 0x7D7DCEFA, 0x52527BA4, 0xDDDD3EA1, 0x5E5E71BC, 0x13139726,
0xA6A6F557, 0xB9B96869, 0x00000000, 0xC1C12C99, 0x40406080, 0xE3E31FDD, 0x7979C8F2, 0xB6B6ED77, 0xD4D4BEB3, 0x8D8D4601, 0x6767D9CE, 0x72724BE4, 0x9494DE33, 0x9898D42B, 0xB0B0E87B, 0x85854A11,
0xBBBB6B6D, 0xC5C52A91, 0x4F4FE59E, 0xEDED16C1, 0x8686C517, 0x9A9AD72F, 0x666655CC, 0x11119422, 0x8A8ACF0F, 0xE9E910C9, 0x04040608, 0xFEFE81E7, 0xA0A0F05B, 0x787844F0, 0x2525BA4A, 0x4B4BE396,
0xA2A2F35F, 0x5D5DFEBA, 0x8080C01B, 0x05058A0A, 0x3F3FAD7E, 0x2121BC42, 0x707048E0, 0xF1F104F9, 0x6363DFC6, 0x7777C1EE, 0xAFAF7545, 0x42426384, 0x20203040, 0xE5E51AD1, 0xFDFD0EE1, 0xBFBF6D65,
0x81814C19, 0x18181430, 0x2626354C, 0xC3C32F9D, 0xBEBEE167, 0x3535A26A, 0x8888CC0B, 0x2E2E395C, 0x9393573D, 0x5555F2AA, 0xFCFC82E3, 0x7A7A47F4, 0xC8C8AC8B, 0xBABAE76F, 0x32322B64, 0xE6E695D7,
0xC0C0A09B, 0x19199832, 0x9E9ED127, 0xA3A37F5D, 0x44446688, 0x54547EA8, 0x3B3BAB76, 0x0B0B8316, 0x8C8CCA03, 0xC7C72995, 0x6B6BD3D6, 0x28283C50, 0xA7A77955, 0xBCBCE263, 0x16161D2C, 0xADAD7641,
0xDBDB3BAD, 0x646456C8, 0x74744EE8, 0x14141E28, 0x9292DB3F, 0x0C0C0A18, 0x48486C90, 0xB8B8E46B, 0x9F9F5D25, 0xBDBD6E61, 0x4343EF86, 0xC4C4A693, 0x3939A872, 0x3131A462, 0xD3D337BD, 0xF2F28BFF,
0xD5D532B1, 0x8B8B430D, 0x6E6E59DC, 0xDADAB7AF, 0x01018C02, 0xB1B16479, 0x9C9CD223, 0x4949E092, 0xD8D8B4AB, 0xACACFA43, 0xF3F307FD, 0xCFCF2585, 0xCACAAF8F, 0xF4F48EF3, 0x4747E98E, 0x10101820,
0x6F6FD5DE, 0xF0F088FB, 0x4A4A6F94, 0x5C5C72B8, 0x38382470, 0x5757F1AE, 0x7373C7E6, 0x97975135, 0xCBCB238D, 0xA1A17C59, 0xE8E89CCB, 0x3E3E217C, 0x9696DD37, 0x6161DCC2, 0x0D0D861A, 0x0F0F851E,
0xE0E090DB, 0x7C7C42F8, 0x7171C4E2, 0xCCCCAA83, 0x9090D83B, 0x0606050C, 0xF7F701F5, 0x1C1C1238, 0xC2C2A39F, 0x6A6A5FD4, 0xAEAEF947, 0x6969D0D2, 0x1717912E, 0x99995829, 0x3A3A2774, 0x2727B94E,
0xD9D938A9, 0xEBEB13CD, 0x2B2BB356, 0x22223344, 0xD2D2BBBF, 0xA9A97049, 0x0707890E, 0x3333A766, 0x2D2DB65A, 0x3C3C2278, 0x1515922A, 0xC9C92089, 0x87874915, 0xAAAAFF4F, 0x505078A0, 0xA5A57A51,
0x03038F06, 0x5959F8B2, 0x09098012, 0x1A1A1734, 0x6565DACA, 0xD7D731B5, 0x8484C613, 0xD0D0B8BB, 0x8282C31F, 0x2929B052, 0x5A5A77B4, 0x1E1E113C, 0x7B7BCBF6, 0xA8A8FC4B, 0x6D6DD6DA, 0x2C2C3A58
};
__device__ static uint32_t T3dn[] = {
0xF4A5F432, 0x9784976F, 0xB099B05E, 0x8C8D8C7A, 0x170D17E8, 0xDCBDDC0A, 0xC8B1C816, 0xFC54FC6D, 0xF050F090, 0x05030507, 0xE0A9E02E, 0x877D87D1, 0x2B192BCC, 0xA662A613, 0x31E6317C, 0xB59AB559,
0xCF45CF40, 0xBC9DBCA3, 0xC040C049, 0x92879268, 0x3F153FD0, 0x26EB2694, 0x40C940CE, 0x1D0B1DE6, 0x2FEC2F6E, 0xA967A91A, 0x1CFD1C43, 0x25EA2560, 0xDABFDAF9, 0x02F70251, 0xA196A145, 0xED5BED76,
0x5DC25D28, 0x241C24C5, 0xE9AEE9D4, 0xBE6ABEF2, 0xEE5AEE82, 0xC341C3BD, 0x060206F3, 0xD14FD152, 0xE45CE48C, 0x07F40756, 0x5C345C8D, 0x180818E1, 0xAE93AE4C, 0x9573953E, 0xF553F597, 0x413F416B,
0x140C141C, 0xF652F663, 0xAF65AFE9, 0xE25EE27F, 0x78287848, 0xF8A1F8CF, 0x110F111B, 0xC4B5C4EB, 0x1B091B15, 0x5A365A7E, 0xB69BB6AD, 0x473D4798, 0x6A266AA7, 0xBB69BBF5, 0x4CCD4C33, 0xBA9FBA50,
0x2D1B2D3F, 0xB99EB9A4, 0x9C749CC4, 0x722E7246, 0x772D7741, 0xCDB2CD11, 0x29EE299D, 0x16FB164D, 0x01F601A5, 0xD74DD7A1, 0xA361A314, 0x49CE4934, 0x8D7B8DDF, 0x423E429F, 0x937193CD, 0xA297A2B1,
0x04F504A2, 0xB868B801, 0x00000000, 0x742C74B5, 0xA060A0E0, 0x211F21C2, 0x43C8433A, 0x2CED2C9A, 0xD9BED90D, 0xCA46CA47, 0x70D97017, 0xDD4BDDAF, 0x79DE79ED, 0x67D467FF, 0x23E82393, 0xDE4ADE5B,
0xBD6BBD06, 0x7E2A7EBB, 0x34E5347B, 0x3A163AD7, 0x54C554D2, 0x62D762F8, 0xFF55FF99, 0xA794A7B6, 0x4ACF4AC0, 0x301030D9, 0x0A060A0E, 0x98819866, 0x0BF00BAB, 0xCC44CCB4, 0xD5BAD5F0, 0x3EE33E75,
0x0EF30EAC, 0x19FE1944, 0x5BC05BDB, 0x858A8580, 0xECADECD3, 0xDFBCDFFE, 0xD848D8A8, 0x0C040CFD, 0x7ADF7A19, 0x58C1582F, 0x9F759F30, 0xA563A5E7, 0x50305070, 0x2E1A2ECB, 0x120E12EF, 0xB76DB708,
0xD44CD455, 0x3C143C24, 0x5F355F79, 0x712F71B2, 0x38E13886, 0xFDA2FDC8, 0x4FCC4FC7, 0x4B394B65, 0xF957F96A, 0x0DF20D58, 0x9D829D61, 0xC947C9B3, 0xEFACEF27, 0x32E73288, 0x7D2B7D4F, 0xA495A442,
0xFBA0FB3B, 0xB398B3AA, 0x68D168F6, 0x817F8122, 0xAA66AAEE, 0x827E82D6, 0xE6ABE6DD, 0x9E839E95, 0x45CA45C9, 0x7B297BBC, 0x6ED36E05, 0x443C446C, 0x8B798B2C, 0x3DE23D81, 0x271D2731, 0x9A769A37,
0x4D3B4D96, 0xFA56FA9E, 0xD24ED2A6, 0x221E2236, 0x76DB76E4, 0x1E0A1E12, 0xB46CB4FC, 0x37E4378F, 0xE75DE778, 0xB26EB20F, 0x2AEF2A69, 0xF1A6F135, 0xE3A8E3DA, 0xF7A4F7C6, 0x5937598A, 0x868B8674,
0x56325683, 0xC543C54E, 0xEB59EB85, 0xC2B7C218, 0x8F8C8F8E, 0xAC64AC1D, 0x6DD26DF1, 0x3BE03B72, 0xC7B4C71F, 0x15FA15B9, 0x090709FA, 0x6F256FA0, 0xEAAFEA20, 0x898E897D, 0x20E92067, 0x28182838,
0x64D5640B, 0x83888373, 0xB16FB1FB, 0x967296CA, 0x6C246C54, 0x08F1085F, 0x52C75221, 0xF351F364, 0x652365AE, 0x847C8425, 0xBF9CBF57, 0x6321635D, 0x7CDD7CEA, 0x7FDC7F1E, 0x9186919C, 0x9485949B,
0xAB90AB4B, 0xC642C6BA, 0x57C45726, 0xE5AAE529, 0x73D873E3, 0x0F050F09, 0x030103F4, 0x3612362A, 0xFEA3FE3C, 0xE15FE18B, 0x10F910BE, 0x6BD06B02, 0xA891A8BF, 0xE858E871, 0x69276953, 0xD0B9D0F7,
0x48384891, 0x351335DE, 0xCEB3CEE5, 0x55335577, 0xD6BBD604, 0x90709039, 0x80898087, 0xF2A7F2C1, 0xC1B6C1EC, 0x6622665A, 0xAD92ADB8, 0x602060A9, 0xDB49DB5C, 0x1AFF1AB0, 0x887888D8, 0x8E7A8E2B,
0x8A8F8A89, 0x13F8134A, 0x9B809B92, 0x39173923, 0x75DA7510, 0x53315384, 0x51C651D5, 0xD3B8D303, 0x5EC35EDC, 0xCBB0CBE2, 0x997799C3, 0x3311332D, 0x46CB463D, 0x1FFC1FB7, 0x61D6610C, 0x4E3A4E62
};
#define PC32up(j, r) ((uint32_t)((j) + (r)))
#define PC32dn(j, r) 0
#define QC32up(j, r) 0xFFFFFFFF
#define QC32dn(j, r) (((uint32_t)(r) << 24) ^ (~((uint32_t)(j) << 24)))
#define B32_0(x) __byte_perm(x, 0, 0x4440) //((x) & 0xFF)
#define B32_1(x) __byte_perm(x, 0, 0x4441) //(((x) >> 8) & 0xFF)
#define B32_2(x) __byte_perm(x, 0, 0x4442) //(((x) >> 16) & 0xFF)
#define B32_3(x) __byte_perm(x, 0, 0x4443) //((x) >> 24)
//#define B32_0(x) ((x) & 0xFF)
//#define B32_1(x) bfe(x, 8, 8)//(((x) >> 8) & 0xFF)
//#define B32_2(x) bfe(x,16, 8)//(((x) >> 16) & 0xFF)
//#define B32_3(x) bfe(x,24, 8)//((x) >> 24)
#define T0up(x) __ldg(&T0up[x])
#define T0dn(x) mixtabs[1][x]
#define T1up(x) mixtabs[2][x]
#define T1dn(x) mixtabs[3][x]
#define T2up(x) mixtabs[4][x]
#define T2dn(x) __ldg(&T2dn[x])
#define T3up(x) mixtabs[6][x]
#define T3dn(x) __ldg(&T3dn[x])
#define RSTT(d0, d1, a, b0, b1, b2, b3, b4, b5, b6, b7) do { \
t[d0] = T0up(B32_0(a[b0]))^ T0dn(B32_0(a[b4]));\
t[d0]^= T1up(B32_1(a[b1]));\
t[d0]^= T1dn(B32_1(a[b5]));\
t[d0]^= T2up(B32_2(a[b2]));\
t[d0]^= T3up(B32_3(a[b3]));\
t[d0]^= T2dn(B32_2(a[b6]));\
t[d0]^= T3dn(B32_3(a[b7]));\
t[d1] = T0dn(B32_0(a[b0])) ^ T1dn(B32_1(a[b1]));\
t[d1]^= T2dn(B32_2(a[b2]));\
t[d1]^= T0up(B32_0(a[b4]));\
t[d1]^= T1up(B32_1(a[b5]));\
t[d1]^= T2up(B32_2(a[b6]));\
t[d1]^= T3up(B32_3(a[b7]));\
t[d1]^= T3dn(B32_3(a[b3]));\
} while (0)
// t[d0] = T0up(B32_0(a[b0])) ^ xor3x(T1up(B32_1(a[b1])), T2up(B32_2(a[b2])), xor3x(T3up(B32_3(a[b3])), T0dn(B32_0(a[b4])), xor3x(T1dn(B32_1(a[b5])), T2dn(B32_2(a[b6])), T3dn(B32_3(a[b7])))));
// t[d1] = T0dn(B32_0(a[b0])) ^ xor3x(T1dn(B32_1(a[b1])), T2dn(B32_2(a[b2])), xor3x(T3dn(B32_3(a[b3])), T0up(B32_0(a[b4])), xor3x(T1up(B32_1(a[b5])), T2up(B32_2(a[b6])), T3up(B32_3(a[b7])))));
__device__ __forceinline__
void groestl256_perm_Q(uint32_t thread, uint32_t *a,const uint32_t mixtabs[8][256])
{
for (int r = 0; r<10; r++)
{
uint32_t t[16];
a[0x0] ^= QC32up(0x00, r);
a[0x1] ^= QC32dn(0x00, r);
a[0x2] ^= QC32up(0x10, r);
a[0x3] ^= QC32dn(0x10, r);
a[0x4] ^= QC32up(0x20, r);
a[0x5] ^= QC32dn(0x20, r);
a[0x6] ^= QC32up(0x30, r);
a[0x7] ^= QC32dn(0x30, r);
a[0x8] ^= QC32up(0x40, r);
a[0x9] ^= QC32dn(0x40, r);
a[0xA] ^= QC32up(0x50, r);
a[0xB] ^= QC32dn(0x50, r);
a[0xC] ^= QC32up(0x60, r);
a[0xD] ^= QC32dn(0x60, r);
a[0xE] ^= QC32up(0x70, r);
a[0xF] ^= QC32dn(0x70, r);
RSTT(0x0, 0x1, a, 0x2, 0x6, 0xA, 0xE, 0x1, 0x5, 0x9, 0xD);
RSTT(0x2, 0x3, a, 0x4, 0x8, 0xC, 0x0, 0x3, 0x7, 0xB, 0xF);
RSTT(0x4, 0x5, a, 0x6, 0xA, 0xE, 0x2, 0x5, 0x9, 0xD, 0x1);
RSTT(0x6, 0x7, a, 0x8, 0xC, 0x0, 0x4, 0x7, 0xB, 0xF, 0x3);
RSTT(0x8, 0x9, a, 0xA, 0xE, 0x2, 0x6, 0x9, 0xD, 0x1, 0x5);
RSTT(0xA, 0xB, a, 0xC, 0x0, 0x4, 0x8, 0xB, 0xF, 0x3, 0x7);
RSTT(0xC, 0xD, a, 0xE, 0x2, 0x6, 0xA, 0xD, 0x1, 0x5, 0x9);
RSTT(0xE, 0xF, a, 0x0, 0x4, 0x8, 0xC, 0xF, 0x3, 0x7, 0xB);
#pragma unroll
for (int k = 0; k<16; k++)
a[k] = t[k];
}
}
__global__ __launch_bounds__(TPB,2)
void groestl256_gpu_hash_32(uint32_t threads, uint32_t startNounce, uint2 *d_Hash, uint32_t *resNonces)
{
__shared__ uint32_t mixtabs[8][256];
mixtabs[0][threadIdx.x] = __ldg(&T0up[threadIdx.x]);
mixtabs[1][threadIdx.x] = __ldg(&T0dn[threadIdx.x]);
mixtabs[2][threadIdx.x] = __ldg(&T1up[threadIdx.x]);
mixtabs[3][threadIdx.x] = __ldg(&T1dn[threadIdx.x]);
mixtabs[4][threadIdx.x] = __ldg(&T2up[threadIdx.x]);
mixtabs[5][threadIdx.x] = __ldg(&T2dn[threadIdx.x]);
mixtabs[6][threadIdx.x] = __ldg(&T3up[threadIdx.x]);
mixtabs[7][threadIdx.x] = __ldg(&T3dn[threadIdx.x]);
const uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x);
const uint32_t nonce = startNounce + thread;
uint32_t t[16];
// GROESTL
uint32_t message[16] = { 0 };
uint32_t state[16];
#pragma unroll
for (int k = 0; k<4; k++)
*(uint2*)&message[2*k]=__ldg(&d_Hash[k*threads+thread]);
__syncthreads();
message[8] = 0x80;
message[15] = 0x01000000;
#pragma unroll 16
for (int u = 0; u<16; u++)
state[u] = message[u];
state[15] ^= 0x10000;
// Perm
// groestl256_perm_P(thread, state, mixtabs);
for (int r = 0; r<10; r++){
state[0x0] ^= PC32up(0x00, r);
state[0x2] ^= PC32up(0x10, r);
state[0x4] ^= PC32up(0x20, r);
state[0x6] ^= PC32up(0x30, r);
state[0x8] ^= PC32up(0x40, r);
state[0xA] ^= PC32up(0x50, r);
state[0xC] ^= PC32up(0x60, r);
state[0xE] ^= PC32up(0x70, r);
RSTT(0x0, 0x1, state, 0x0, 0x2, 0x4, 0x6, 0x9, 0xB, 0xD, 0xF);
RSTT(0x2, 0x3, state, 0x2, 0x4, 0x6, 0x8, 0xB, 0xD, 0xF, 0x1);
RSTT(0x4, 0x5, state, 0x4, 0x6, 0x8, 0xA, 0xD, 0xF, 0x1, 0x3);
RSTT(0x6, 0x7, state, 0x6, 0x8, 0xA, 0xC, 0xF, 0x1, 0x3, 0x5);
RSTT(0x8, 0x9, state, 0x8, 0xA, 0xC, 0xE, 0x1, 0x3, 0x5, 0x7);
RSTT(0xA, 0xB, state, 0xA, 0xC, 0xE, 0x0, 0x3, 0x5, 0x7, 0x9);
RSTT(0xC, 0xD, state, 0xC, 0xE, 0x0, 0x2, 0x5, 0x7, 0x9, 0xB);
RSTT(0xE, 0xF, state, 0xE, 0x0, 0x2, 0x4, 0x7, 0x9, 0xB, 0xD);
#pragma unroll 16
for (int k = 0; k<16; k++)
state[k] = t[k];
}
state[15] ^= 0x10000;
groestl256_perm_Q(thread, message, mixtabs);
#pragma unroll 16
for (int u = 0; u<16; u++) state[u] ^= message[u];
// #pragma unroll 16
// for (int u = 0; u<16; u++) message[u] = state[u];
uint32_t state15 = state[15];
// groestl256_perm_P(thread, state, mixtabs);
for (int r = 0; r<10; r++){
state[0x0] ^= PC32up(0x00, r);
state[0x2] ^= PC32up(0x10, r);
state[0x4] ^= PC32up(0x20, r);
state[0x6] ^= PC32up(0x30, r);
state[0x8] ^= PC32up(0x40, r);
state[0xA] ^= PC32up(0x50, r);
state[0xC] ^= PC32up(0x60, r);
state[0xE] ^= PC32up(0x70, r);
RSTT(0x0, 0x1, state, 0x0, 0x2, 0x4, 0x6, 0x9, 0xB, 0xD, 0xF);
RSTT(0x2, 0x3, state, 0x2, 0x4, 0x6, 0x8, 0xB, 0xD, 0xF, 0x1);
RSTT(0x4, 0x5, state, 0x4, 0x6, 0x8, 0xA, 0xD, 0xF, 0x1, 0x3);
RSTT(0x6, 0x7, state, 0x6, 0x8, 0xA, 0xC, 0xF, 0x1, 0x3, 0x5);
RSTT(0x8, 0x9, state, 0x8, 0xA, 0xC, 0xE, 0x1, 0x3, 0x5, 0x7);
RSTT(0xA, 0xB, state, 0xA, 0xC, 0xE, 0x0, 0x3, 0x5, 0x7, 0x9);
RSTT(0xC, 0xD, state, 0xC, 0xE, 0x0, 0x2, 0x5, 0x7, 0x9, 0xB);
RSTT(0xE, 0xF, state, 0xE, 0x0, 0x2, 0x4, 0x7, 0x9, 0xB, 0xD);
#pragma unroll 16
for (int k = 0; k<16; k++)
state[k] = t[k];
}
if ((state15^state[15]) <= pTarget[7]) {
uint32_t tmp = atomicExch(&resNonces[0], nonce);
if (tmp != UINT32_MAX)
resNonces[1] = tmp;
}
}
__host__
void groestl256_cpu_init(int thr_id, uint32_t threads)
{
cudaMalloc(&d_GNonces[thr_id], 2*sizeof(uint32_t));
cudaMallocHost(&h_GNonces[thr_id], 2*sizeof(uint32_t));
}
__host__
void groestl256_cpu_free(int thr_id)
{
cudaFree(d_GNonces[thr_id]);
cudaFreeHost(h_GNonces[thr_id]);
}
__host__
void groestl256_cpu_hash_32(int thr_id, uint32_t threads, uint32_t startNounce, uint2 *d_Hash, uint32_t *resultnonces)
{
// berechne wie viele Thread Blocks wir brauchen
dim3 grid((threads + TPB-1)/TPB);
dim3 block(TPB);
size_t shared_size = 8 * 256 * sizeof(uint32_t);
groestl256_gpu_hash_32<<<grid, block, shared_size>>>(threads, startNounce, d_Hash, d_GNonces[thr_id]);
// cudaThreadSynchronize();
cudaMemcpy(resultnonces, d_GNonces[thr_id], 2*sizeof(uint32_t), cudaMemcpyDeviceToHost);
}
__host__
void groestl256_set_output(int thr_id)
{
cudaMemset(d_GNonces[thr_id], 0xff, 2*sizeof(uint32_t));
}
__host__
void groestl256_setTarget(const void *pTargetIn)
{
cudaMemcpyToSymbol(pTarget, pTargetIn, 32, 0, cudaMemcpyHostToDevice);
}
|
the_stack
|
// CGLS Conjugate Gradient Least Squares
// Attempts to solve the least squares problem
//
// min. ||Ax - b||_2^2 + s ||x||_2^2
//
// using the Conjugate Gradient for Least Squares method. This is more stable
// than applying CG to the normal equations. Supports both generic operators
// for computing Ax and A^Tx as well as a sparse matrix version.
//
// ------------------------------ GENERIC ------------------------------------
//
// Template Arguments:
// T - Data type (float or double).
//
// F - Generic GEMV-like functor type with signature
// int gemv(char op, T alpha, const T *x, T beta, T *y). Upon
// exit, y should take on the value y := alpha*op(A)x + beta*y.
// If successful the functor must return 0, otherwise a non-zero
// value should be returned.
//
// Function Arguments:
// A - Operator that computes Ax and A^Tx.
//
// (m, n) - Matrix dimensions of A.
//
// b - Pointer to right-hand-side vector.
//
// x - Pointer to solution. This vector will also be used as an
// initial guess, so it must be initialized (eg. to 0).
//
// shift - Regularization parameter s. Solves (A'*A + shift*I)*x = A'*b.
//
// tol - Specifies tolerance (recommended 1e-6).
//
// maxit - Maximum number of iterations (recommended > 100).
//
// quiet - Disable printing to console.
//
// ------------------------------ SPARSE --------------------------------------
//
// Template Arguments:
// T - Data type (float or double).
//
// O - Sparse ordering (cgls::CSC or cgls::CSR).
//
// Function Arguments:
// val - Array of matrix values. The array should be of length nnz.
//
// ptr - Column pointer if (O is CSC) or row pointer if (O is CSR).
// The array should be of length m+1.
//
// ind - Row indices if (O is CSC) or column indices if (O is CSR).
// The array should be of length nnz.
//
// (m, n) - Matrix dimensions of A.
//
// nnz - Number of non-zeros in A.
//
// b - Pointer to right-hand-side vector.
//
// x - Pointer to solution. This vector will also be used as an
// initial guess, so it must be initialized (eg. to 0).
//
// shift - Regularization parameter s. Solves (A'*A + shift*I)*x = A'*b.
//
// tol - Specifies tolerance (recommended 1e-6).
//
// maxit - Maximum number of iterations (recommended > 100).
//
// quiet - Disable printing to console.
//
// ----------------------------------------------------------------------------
//
// Returns:
// 0 : CGLS converged to the desired tolerance tol within maxit iterations.
// 1 : The vector b had norm less than eps, solution likely x = 0.
// 2 : CGLS iterated maxit times but did not converge.
// 3 : Matrix (A'*A + shift*I) seems to be singular or indefinite.
// 4 : Likely instable, (A'*A + shift*I) indefinite and norm(x) decreased.
// 5 : Error in applying operator A.
// 6 : Error in applying operator A^T.
//
// Reference:
// http://web.stanford.edu/group/SOL/software/cgls/
//
#ifndef CGLS_CUH_
#define CGLS_CUH_
#include <assert.h>
#include <stdio.h>
#include <cublas_v2.h>
#include <cusparse.h>
#include <thrust/device_ptr.h>
#include <thrust/functional.h>
#include <thrust/transform_reduce.h>
#include <algorithm>
#include <limits>
// Macro to distinguish between cublas and thrust nrm2.
// #define CGLS_USE_THRUST_NRM2
// #define CGLS_DISABLE_ERROR_CHECK
// Macro to check for CUDA errors.
#ifndef CGLS_DISABLE_ERROR_CHECK
#define CGLS_CUDA_CHECK_ERR() \
do { \
cudaError_t err = cudaGetLastError(); \
if (err != cudaSuccess) { \
printf("%s:%d:%s\n ERROR_CUDA: %s\n", __FILE__, __LINE__, __func__, \
cudaGetErrorString(err)); \
exit(EXIT_FAILURE); \
} \
} while (0)
#else
#define CGLS_CUDA_CHECK_ERR()
#endif
namespace cgls {
// Data type for sparse format.
enum CGLS_ORD { CSC, CSR };
// Data type for indices. Don't change this unless Nvidia some day
// changes their API (a la MKL).
typedef int INT;
// Abstract GEMV-like operator.
template <typename T>
struct Gemv {
virtual ~Gemv() { };
virtual int operator()(char op, const T alpha, const T *x, const T beta,
T *y) const = 0;
};
// File-level functions and classes.
namespace {
// Converts 'n' or 't' to a cusparseOperation_t variable.
inline cusparseOperation_t OpToCusparseOp(char op) {
assert(op == 'n' || op == 'N' || op == 't' || op == 'T');
return (op == 'n' || op == 'N') ?
CUSPARSE_OPERATION_NON_TRANSPOSE : CUSPARSE_OPERATION_TRANSPOSE;
}
inline cusparseOperation_t OpToCusparseCxOp(char op) {
assert(op == 'n' || op == 'N' || op == 't' || op == 'T');
return (op == 'n' || op == 'N') ?
CUSPARSE_OPERATION_NON_TRANSPOSE : CUSPARSE_OPERATION_CONJUGATE_TRANSPOSE;
}
// Sparse matrix-vector multiply templates.
template <typename T, CGLS_ORD O>
class Spmv : Gemv<T> {
private:
cusparseHandle_t _handle;
cusparseMatDescr_t _descr;
INT _m, _n, _nnz;
const T *_val;
const INT *_ptr, *_ind;
public:
Spmv(INT m, INT n, INT nnz, const T *val, const INT *ptr, const INT *ind)
: _m(m), _n(n), _nnz(nnz), _val(val), _ptr(ptr), _ind(ind) {
cusparseCreate(&_handle);
cusparseCreateMatDescr(&_descr);
CGLS_CUDA_CHECK_ERR();
}
~Spmv() {
cusparseDestroy(_handle);
cusparseDestroyMatDescr(_descr);
CGLS_CUDA_CHECK_ERR();
}
int operator()(char op, const T alpha, const T *x, const T beta, T *y) const;
};
// Double CSR and CSC.
template <>
inline int Spmv<double, CSR>::operator()(char op, const double alpha,
const double *x, const double beta,
double *y) const {
cusparseStatus_t err = cusparseDcsrmv(_handle, OpToCusparseOp(op), _m, _n,
_nnz, &alpha, _descr, _val, _ptr, _ind, x, &beta, y);
CGLS_CUDA_CHECK_ERR();
return err != CUSPARSE_STATUS_SUCCESS;
}
template <>
inline int Spmv<double, CSC>::operator()(char op, const double alpha,
const double *x, const double beta,
double *y) const {
cusparseOperation_t cu_op = OpToCusparseOp(op);
if (cu_op == CUSPARSE_OPERATION_TRANSPOSE)
cu_op = CUSPARSE_OPERATION_NON_TRANSPOSE;
else
cu_op = CUSPARSE_OPERATION_TRANSPOSE;
cusparseStatus_t err = cusparseDcsrmv(_handle, cu_op, _n, _m, _nnz, &alpha,
_descr, _val, _ptr, _ind, x, &beta, y);
CGLS_CUDA_CHECK_ERR();
return err != CUSPARSE_STATUS_SUCCESS;
}
// Float CSR and CSC.
template <>
inline int Spmv<float, CSR>::operator()(char op, const float alpha,
const float *x, const float beta,
float *y) const {
cusparseStatus_t err = cusparseScsrmv(_handle, OpToCusparseOp(op), _m, _n,
_nnz, &alpha, _descr, _val, _ptr, _ind, x, &beta, y);
CGLS_CUDA_CHECK_ERR();
return err != CUSPARSE_STATUS_SUCCESS;
}
template <>
inline int Spmv<float, CSC>::operator()(char op, const float alpha,
const float *x, const float beta,
float *y) const {
cusparseOperation_t cu_op = OpToCusparseOp(op);
if (cu_op == CUSPARSE_OPERATION_TRANSPOSE)
cu_op = CUSPARSE_OPERATION_NON_TRANSPOSE;
else
cu_op = CUSPARSE_OPERATION_TRANSPOSE;
cusparseStatus_t err = cusparseScsrmv(_handle, cu_op, _n, _m, _nnz, &alpha,
_descr, _val, _ptr, _ind, x, &beta, y);
CGLS_CUDA_CHECK_ERR();
return err;
}
// Double Complex CSR and CSC.
template <>
inline int Spmv<cuDoubleComplex, CSR>::
operator()(char op, const cuDoubleComplex alpha, const cuDoubleComplex *x,
const cuDoubleComplex beta, cuDoubleComplex *y) const {
cusparseStatus_t err = cusparseZcsrmv(_handle, OpToCusparseCxOp(op), _m, _n,
_nnz, &alpha, _descr, _val, _ptr, _ind, x, &beta, y);
CGLS_CUDA_CHECK_ERR();
return err != CUSPARSE_STATUS_SUCCESS;
}
template <>
inline int Spmv<cuDoubleComplex, CSC>::
operator()(char op, const cuDoubleComplex alpha, const cuDoubleComplex *x,
const cuDoubleComplex beta, cuDoubleComplex *y) const {
cusparseOperation_t cu_op = OpToCusparseCxOp(op);
if (cu_op == CUSPARSE_OPERATION_CONJUGATE_TRANSPOSE)
cu_op = CUSPARSE_OPERATION_NON_TRANSPOSE;
else
cu_op = CUSPARSE_OPERATION_CONJUGATE_TRANSPOSE;
cusparseStatus_t err = cusparseZcsrmv(_handle, cu_op, _n, _m, _nnz, &alpha,
_descr, _val, _ptr, _ind, x, &beta, y);
CGLS_CUDA_CHECK_ERR();
return err != CUSPARSE_STATUS_SUCCESS;
}
// Single Complex CSR and CSC.
template <>
inline int Spmv<cuFloatComplex, CSR>::
operator()(char op, const cuFloatComplex alpha, const cuFloatComplex *x,
const cuFloatComplex beta, cuFloatComplex *y) const {
cusparseStatus_t err = cusparseCcsrmv(_handle, OpToCusparseCxOp(op), _m, _n,
_nnz, &alpha, _descr, _val, _ptr, _ind, x, &beta, y);
CGLS_CUDA_CHECK_ERR();
return err != CUSPARSE_STATUS_SUCCESS;
}
template <>
inline int Spmv<cuFloatComplex, CSC>::
operator()(char op, const cuFloatComplex alpha, const cuFloatComplex *x,
const cuFloatComplex beta, cuFloatComplex *y) const {
cusparseOperation_t cu_op = OpToCusparseCxOp(op);
if (cu_op == CUSPARSE_OPERATION_CONJUGATE_TRANSPOSE)
cu_op = CUSPARSE_OPERATION_NON_TRANSPOSE;
else
cu_op = CUSPARSE_OPERATION_CONJUGATE_TRANSPOSE;
cusparseStatus_t err = cusparseCcsrmv(_handle, cu_op, _n, _m, _nnz, &alpha,
_descr, _val, _ptr, _ind, x, &beta, y);
CGLS_CUDA_CHECK_ERR();
return err != CUSPARSE_STATUS_SUCCESS;
}
// Class for sparse matrix and its transpose.
template <typename T, CGLS_ORD O>
class SpmvNT : Gemv<T> {
private:
Spmv<T, O> A;
Spmv<T, O> At;
public:
SpmvNT(INT m, INT n, INT nnz, const T *val_a, const INT *ptr_a,
const INT *ind_a, const T *val_at, const INT *ptr_at,
const INT *ind_at)
: A(m, n, nnz, val_a, ptr_a, ind_a),
At(n, m, nnz, val_at, ptr_at, ind_at) { }
int operator()(char op, const T alpha, const T *x, const T beta, T *y) const {
switch (O) {
case CSR:
if (op == 'n' || op == 'N')
return A('n', alpha, x, beta, y);
else
return At('n', alpha, x, beta, y);
case CSC:
if (op == 'n' || op == 'N')
return At('t', alpha, x, beta, y);
else
return A('t', alpha, x, beta, y);
default:
assert(false);
return 1;
}
}
};
// Axpy function.
inline cublasStatus_t axpy(cublasHandle_t handle, INT n, double *alpha,
const double *x, INT incx, double *y, INT incy) {
cublasStatus_t err = cublasDaxpy(handle, n, alpha, x, incx, y, incy);
CGLS_CUDA_CHECK_ERR();
return err;
}
inline cublasStatus_t axpy(cublasHandle_t handle, INT n, float *alpha,
const float *x, INT incx, float *y, INT incy) {
cublasStatus_t err = cublasSaxpy(handle, n, alpha, x, incx, y, incy);
CGLS_CUDA_CHECK_ERR();
return err;
}
inline cublasStatus_t axpy(cublasHandle_t handle, INT n, cuDoubleComplex *alpha,
const cuDoubleComplex *x, INT incx,
cuDoubleComplex *y, INT incy) {
cublasStatus_t err = cublasZaxpy(handle, n, alpha, x, incx, y, incy);
CGLS_CUDA_CHECK_ERR();
return err;
}
inline cublasStatus_t axpy(cublasHandle_t handle, INT n, cuFloatComplex *alpha,
const cuFloatComplex *x, INT incx, cuFloatComplex *y,
INT incy) {
cublasStatus_t err = cublasCaxpy(handle, n, alpha, x, incx, y, incy);
CGLS_CUDA_CHECK_ERR();
return err;
}
#ifdef CGLS_USE_THRUST_NRM2
// 2-Norm based on thrust, potentially not as stable as cuBLAS version.
template <typename T>
struct NormSquared : thrust::unary_function<T, double> {
inline __device__ double operator()(const T &x);
};
template <>
inline __device__ double NormSquared<double>::operator()(const double &x) {
return x * x;
}
template <>
inline __device__ double NormSquared<float>::operator()(const float &x) {
return static_cast<double>(x) * static_cast<double>(x);
}
template <>
inline __device__ double NormSquared<cuDoubleComplex>::
operator()(const cuDoubleComplex &x) {
return static_cast<double>(x.x) * static_cast<double>(x.x) +
static_cast<double>(x.y) * static_cast<double>(x.y);
}
template <>
inline __device__ double NormSquared<cuFloatComplex>::
operator()(const cuFloatComplex &x) {
return static_cast<double>(x.x) * static_cast<double>(x.x) +
static_cast<double>(x.y) * static_cast<double>(x.y);
}
template <typename T>
void nrm2(cublasHandle_t hdl, INT n, const T *x, double *result) {
*result = sqrt(thrust::transform_reduce(thrust::device_pointer_cast(x),
thrust::device_pointer_cast(x + n), NormSquared<T>(), 0.,
thrust::plus<double>()));
CGLS_CUDA_CHECK_ERR();
}
#else
// cuBLAS nrm2 implementation.
void nrm2(cublasHandle_t hdl, INT n, const double *x, double *result) {
cublasDnrm2(hdl, n, x, static_cast<INT>(1), result);
CGLS_CUDA_CHECK_ERR();
}
void nrm2(cublasHandle_t hdl, INT n, const float *x, double *result) {
float result_float;
cublasSnrm2(hdl, n, x, static_cast<INT>(1), &result_float);
*result = static_cast<double>(result_float);
CGLS_CUDA_CHECK_ERR();
}
void nrm2(cublasHandle_t hdl, INT n, const cuDoubleComplex *x, double *result) {
cublasDznrm2(hdl, n, x, static_cast<INT>(1), result);
CGLS_CUDA_CHECK_ERR();
}
void nrm2(cublasHandle_t hdl, INT n, const cuFloatComplex *x, double *result) {
float result_float;
cublasScnrm2(hdl, n, x, static_cast<INT>(1), &result_float);
*result = static_cast<double>(result_float);
CGLS_CUDA_CHECK_ERR();
}
#endif
// Casting from double to float, double, complex_float, and complex_double.
template <typename T>
T StaticCast(double x);
template <>
inline double StaticCast<double>(double x) {
return x;
}
template <>
inline float StaticCast<float>(double x) {
return static_cast<float>(x);
}
template <>
inline cuDoubleComplex StaticCast<cuDoubleComplex>(double x) {
return make_cuDoubleComplex(x, 0.);
}
template <>
inline cuFloatComplex StaticCast<cuFloatComplex>(double x) {
return make_cuFloatComplex(x, 0.f);
}
// Numeric limit epsilon for float, double, complex_float, and complex_double.
template <typename T>
double Epsilon();
template<>
inline double Epsilon<double>() {
return std::numeric_limits<double>::epsilon();
}
template<>
inline double Epsilon<cuDoubleComplex>() {
return std::numeric_limits<double>::epsilon();
}
template<>
inline double Epsilon<float>() {
return std::numeric_limits<float>::epsilon();
}
template<>
inline double Epsilon<cuFloatComplex>() {
return std::numeric_limits<float>::epsilon();
}
} // namespace
// Conjugate Gradient Least Squares.
template <typename T, typename F>
int Solve(cublasHandle_t handle, const F& A, const INT m, const INT n,
const T *b, T *x, const double shift, const double tol,
const int maxit, bool quiet) {
// Variable declarations.
T *p, *q, *r, *s;
double gamma, normp, normq, norms, norms0, normx, xmax;
char fmt[] = "%5d %9.2e %12.5g\n";
int err = 0, k = 0, flag = 0, indefinite = 0;
// Constant declarations.
const T kNegOne = StaticCast<T>(-1.);
const T kZero = StaticCast<T>( 0.);
const T kOne = StaticCast<T>( 1.);
const T kNegShift = StaticCast<T>(-shift);
const double kEps = Epsilon<T>();
// Memory Allocation.
cudaMalloc(&p, n * sizeof(T));
cudaMalloc(&q, m * sizeof(T));
cudaMalloc(&r, m * sizeof(T));
cudaMalloc(&s, n * sizeof(T));
CGLS_CUDA_CHECK_ERR();
cudaMemcpy(r, b, m * sizeof(T), cudaMemcpyDeviceToDevice);
cudaMemcpy(s, x, n * sizeof(T), cudaMemcpyDeviceToDevice);
CGLS_CUDA_CHECK_ERR();
// r = b - A*x.
nrm2(handle, n, x, &normx);
cudaDeviceSynchronize();
CGLS_CUDA_CHECK_ERR();
if (normx > 0.) {
err = A('n', kNegOne, x, kOne, r);
cudaDeviceSynchronize();
CGLS_CUDA_CHECK_ERR();
if (err)
flag = 5;
}
// s = A'*r - shift*x.
err = A('t', kOne, r, kNegShift, s);
cudaDeviceSynchronize();
CGLS_CUDA_CHECK_ERR();
if (err)
flag = 6;
// Initialize.
cudaMemcpy(p, s, n * sizeof(T), cudaMemcpyDeviceToDevice);
nrm2(handle, n, s, &norms);
norms0 = norms;
gamma = norms0 * norms0;
nrm2(handle, n, x, &normx);
xmax = normx;
cudaDeviceSynchronize();
CGLS_CUDA_CHECK_ERR();
if (norms < kEps)
flag = 1;
if (!quiet)
printf(" k normx resNE\n");
for (k = 0; k < maxit && !flag; ++k) {
// q = A * p.
err = A('n', kOne, p, kZero, q);
cudaDeviceSynchronize();
CGLS_CUDA_CHECK_ERR();
if (err) {
flag = 5;
break;
}
// delta = norm(p)^2 + shift*norm(q)^2.
nrm2(handle, n, p, &normp);
nrm2(handle, m, q, &normq);
cudaDeviceSynchronize();
CGLS_CUDA_CHECK_ERR();
double delta = normq * normq + shift * normp * normp;
if (delta <= 0.)
indefinite = 1;
if (delta == 0.)
delta = kEps;
T alpha = StaticCast<T>(gamma / delta);
T neg_alpha = StaticCast<T>(-gamma / delta);
// x = x + alpha*p.
// r = r - alpha*q.
axpy(handle, n, &alpha, p, 1, x, 1);
axpy(handle, m, &neg_alpha, q, 1, r, 1);
cudaDeviceSynchronize();
CGLS_CUDA_CHECK_ERR();
// s = A'*r - shift*x.
cudaMemcpy(s, x, n * sizeof(T), cudaMemcpyDeviceToDevice);
err = A('t', kOne, r, kNegShift, s);
cudaDeviceSynchronize();
CGLS_CUDA_CHECK_ERR();
if (err) {
flag = 6;
break;
}
// Compute beta.
nrm2(handle, n, s, &norms);
cudaDeviceSynchronize();
CGLS_CUDA_CHECK_ERR();
double gamma1 = gamma;
gamma = norms * norms;
T beta = StaticCast<T>(gamma / gamma1);
// p = s + beta*p.
axpy(handle, n, &beta, p, 1, s, 1);
cudaMemcpy(p, s, n * sizeof(T), cudaMemcpyDeviceToDevice);
cudaDeviceSynchronize();
CGLS_CUDA_CHECK_ERR();
// Convergence check.
nrm2(handle, n, x, &normx);
cudaDeviceSynchronize();
CGLS_CUDA_CHECK_ERR();
xmax = std::max(xmax, normx);
bool converged = (norms <= norms0 * tol) || (normx * tol >= 1.);
if (!quiet && (converged || k % 10 == 0))
printf(fmt, k, normx, norms / norms0);
if (converged)
break;
}
// Determine exit status.
double shrink = normx / xmax;
if (k == maxit)
flag = 2;
else if (indefinite)
flag = 3;
else if (shrink * shrink <= tol)
flag = 4;
// Free variables and return;
cudaFree(p);
cudaFree(q);
cudaFree(r);
cudaFree(s);
CGLS_CUDA_CHECK_ERR();
return flag;
}
// Sparse CGLS.
template <typename T, CGLS_ORD O>
int Solve(const T *val, const INT *ptr, const INT *ind, const INT m,
const INT n, const INT nnz, const T *b, T *x, const double shift,
const double tol, const int maxit, bool quiet) {
cublasHandle_t handle;
cublasCreate(&handle);
CGLS_CUDA_CHECK_ERR();
Spmv<T, O> A(m, n, nnz, val, ptr, ind);
int status = Solve(handle, A, m, n, b, x, shift, tol, maxit, quiet);
cublasDestroy(handle);
CGLS_CUDA_CHECK_ERR();
return status;
}
// Sparse CGLS with both A and A^T.
template <typename T, CGLS_ORD O>
int Solve(const T *val_a, const INT *ptr_a, const INT *ind_a, const T *val_at,
const INT *ptr_at, const INT *ind_at, const INT m, const INT n,
const INT nnz, const T *b, T *x, const double shift, const double tol,
const int maxit, bool quiet) {
cublasHandle_t handle;
cublasCreate(&handle);
CGLS_CUDA_CHECK_ERR();
SpmvNT<T, O> A(m, n, nnz, val_a, ptr_a, ind_a, val_at, ptr_at, ind_at);
int status = Solve(handle, A, m, n, b, x, shift, tol, maxit, quiet);
cublasDestroy(handle);
CGLS_CUDA_CHECK_ERR();
return status;
}
} // namespace cgls
#endif // CGLS_CUH_
|
the_stack
|
#ifndef INCLUDE_GGNN_CACHE_CUDA_SIMPLE_KNN_CACHE_NO_SLACK_CUH_
#define INCLUDE_GGNN_CACHE_CUDA_SIMPLE_KNN_CACHE_NO_SLACK_CUH_
#include <cuda.h>
#include <cuda_runtime.h>
#include <cub/cub.cuh>
#include <limits>
#include "ggnn/utils/cuda_knn_distance.cuh"
#include "ggnn/utils/cuda_knn_utils.cuh"
template <DistanceMeasure measure,
typename ValueT, typename KeyT, int KQuery, int D, int BLOCK_DIM_X,
int VISITED_SIZE = 256, int PRIOQ_SIZE = 128, int BEST_SIZE = 32,
typename BaseT = ValueT, typename BAddrT = KeyT,
bool DIST_STATS = false, bool OVERFLOW_STATS = false>
struct SimpleKNNCacheNoSlack {
static constexpr KeyT EMPTY_KEY = (KeyT)-1;
static constexpr ValueT EMPTY_DIST = std::numeric_limits<ValueT>::infinity();
private:
static constexpr int CACHE_SIZE = BEST_SIZE + PRIOQ_SIZE + VISITED_SIZE;
static constexpr int SORTED_SIZE = BEST_SIZE + PRIOQ_SIZE;
static constexpr int DIST_ITEMS_PER_THREAD = (D - 1) / BLOCK_DIM_X + 1;
static constexpr int BEST_ITEMS_PER_THREAD =
(BEST_SIZE - 1) / BLOCK_DIM_X + 1;
static constexpr int PRIOQ_ITEMS_PER_THREAD =
(PRIOQ_SIZE - 1) / BLOCK_DIM_X + 1;
static constexpr int CACHE_ITEMS_PER_THREAD =
(CACHE_SIZE - 1) / BLOCK_DIM_X + 1;
static constexpr int SORTED_ITEMS_PER_THREAD =
(SORTED_SIZE - 1) / BLOCK_DIM_X + 1;
static constexpr int BEST_END = BEST_SIZE - 1;
typedef Distance<measure, ValueT, KeyT, D, BLOCK_DIM_X, BaseT, BAddrT> Distance;
union SyncTempStorage {
KeyT cache;
bool flag;
};
public:
KeyT* s_cache;
ValueT* s_dists;
int& s_prioQ_head;
int& s_visited_head;
int& s_overflow_counter;
SyncTempStorage& s_sync;
Distance rs_dist_calc;
//# threadIdx.x == 0 stats registers only
int dist_calc_counter;
__device__ __forceinline__ void initSharedStorage() {
__shared__ KeyT s_cache_tmp[CACHE_SIZE];
__shared__ ValueT s_dists_tmp[SORTED_SIZE];
s_cache = reinterpret_cast<KeyT*>(s_cache_tmp);
s_dists = reinterpret_cast<ValueT*>(s_dists_tmp);
}
__device__ __forceinline__ SyncTempStorage& SyncPrivateTmpStorage() {
__shared__ SyncTempStorage s_sync_tmp;
return s_sync_tmp;
}
__device__ __forceinline__ int& PrioQRingPrivateTmpStorage() {
__shared__ int s_prioQ_head_tmp;
return s_prioQ_head_tmp;
}
__device__ __forceinline__ int& CacheRingPrivateTmpStorage() {
__shared__ int s_visited_head_tmp;
return s_visited_head_tmp;
}
__device__ __forceinline__ int& OverflowPrivateTmpStorage() {
__shared__ int s_overflow_tmp;
return s_overflow_tmp;
}
__device__ __forceinline__ void init() {
for (int i = threadIdx.x; i < CACHE_SIZE; i += BLOCK_DIM_X) {
s_cache[i] = EMPTY_KEY;
}
for (int i = threadIdx.x; i < SORTED_SIZE; i += BLOCK_DIM_X) {
s_dists[i] = EMPTY_DIST;
}
if (DIST_STATS && !threadIdx.x) dist_calc_counter = 0;
if (OVERFLOW_STATS && !threadIdx.x) s_overflow_counter = 0;
if (!threadIdx.x) {
s_prioQ_head = 0;
s_visited_head = 0;
}
__syncthreads();
}
__device__ __forceinline__ SimpleKNNCacheNoSlack(const BaseT* d_base, const KeyT n)
: s_prioQ_head(PrioQRingPrivateTmpStorage()),
s_visited_head(CacheRingPrivateTmpStorage()),
s_overflow_counter(OverflowPrivateTmpStorage()),
s_sync(SyncPrivateTmpStorage()),
rs_dist_calc(d_base, n) {
initSharedStorage();
init();
}
__device__ __forceinline__ SimpleKNNCacheNoSlack(const BaseT* d_base,
const BaseT* d_query, const KeyT n)
: s_prioQ_head(PrioQRingPrivateTmpStorage()),
s_visited_head(CacheRingPrivateTmpStorage()),
s_overflow_counter(OverflowPrivateTmpStorage()),
s_sync(SyncPrivateTmpStorage()),
rs_dist_calc(d_base, d_query, n){
initSharedStorage();
init();
}
__device__ __forceinline__ bool criteria(ValueT dist) {
return dist < s_dists[BEST_SIZE - 1];
}
__device__ __forceinline__ bool is_end(int tid) {
const int prev_prioQ_ring =
(s_prioQ_head - 1 < 0) ? PRIOQ_SIZE - 1 : s_prioQ_head - 1;
return tid == BEST_END || tid == BEST_SIZE + prev_prioQ_ring;
}
__device__ __forceinline__ void push(const KeyT key, const ValueT dist) {
__syncthreads();
// Register for insertion in best and prioq
KeyT r_cache[SORTED_ITEMS_PER_THREAD];
ValueT r_dists[SORTED_ITEMS_PER_THREAD];
int r_write_item_best = -1;
int r_write_item_prioQ = -1;
if (!threadIdx.x) s_sync.flag = true;
__syncthreads();
// Load items for insertion.
for (int item = 0; item < SORTED_ITEMS_PER_THREAD && s_sync.flag; ++item) {
const int idx = item * BLOCK_DIM_X + threadIdx.x;
if (idx < SORTED_SIZE) {
r_cache[item] = s_cache[idx];
r_dists[item] = s_dists[idx];
if (r_cache[item] == key) s_sync.flag = false;
}
}
__syncthreads();
// TODO(fabi) return on s_sync.flag = true?
for (int item = 0; item < SORTED_ITEMS_PER_THREAD && s_sync.flag; ++item) {
const int idx = item * BLOCK_DIM_X + threadIdx.x;
if (idx < SORTED_SIZE) {
if (r_dists[item] >= dist) {
// Don't move if no entry or end of best or prioq.
if ((r_cache[item] != EMPTY_KEY) && !is_end(idx)) {
const int idx_next = (idx + 1 == SORTED_SIZE) ? BEST_SIZE : idx + 1;
s_cache[idx_next] = r_cache[item];
s_dists[idx_next] = r_dists[item];
}
// Find insert points.
const int idx_prev = idx - 1;
const ValueT dist_prev =
((idx_prev == -1) || (idx_prev == BEST_SIZE + s_prioQ_head - 1))
? -1.f
: (idx_prev == BEST_END) ? s_dists[SORTED_SIZE - 1]
: s_dists[idx_prev];
if (dist_prev < dist) {
if (idx < BEST_SIZE)
r_write_item_best = item;
else
r_write_item_prioQ = item;
}
}
}
}
__syncthreads();
// Insert into best and prioq.
if (r_write_item_best >= 0) {
const int idx = r_write_item_best * BLOCK_DIM_X + threadIdx.x;
s_cache[idx] = key;
s_dists[idx] = dist;
}
if (r_write_item_prioQ >= 0) {
const int idx = r_write_item_prioQ * BLOCK_DIM_X + threadIdx.x;
s_cache[idx] = key;
s_dists[idx] = dist;
}
}
__device__ __forceinline__ KeyT pop() {
__syncthreads();
if (!threadIdx.x) {
const int head_idx_prioQ = BEST_SIZE + s_prioQ_head;
const ValueT dist = s_dists[head_idx_prioQ];
if (dist == EMPTY_DIST) {
// Pop on empty prioQ.
s_sync.cache = EMPTY_KEY;
} else {
if (!criteria(dist)) {
s_sync.cache = EMPTY_KEY;
} else {
const KeyT key = s_cache[head_idx_prioQ];
s_sync.cache = key;
const int head_idx_visited = SORTED_SIZE + s_visited_head;
s_cache[head_idx_visited] = key;
s_visited_head = (s_visited_head + 1) % VISITED_SIZE;
}
s_cache[head_idx_prioQ] = EMPTY_KEY;
s_dists[head_idx_prioQ] = EMPTY_DIST;
// Move ring-buffer head forward.
s_prioQ_head = (s_prioQ_head + 1) % PRIOQ_SIZE;
}
}
__syncthreads();
return s_sync.cache;
}
__device__ __forceinline__ void fetch(KeyT* s_keys, const KeyT* d_translation,
int len, bool debug = false) {
__syncthreads();
for (int item = 0; item < CACHE_ITEMS_PER_THREAD; ++item) {
const int i = item * BLOCK_DIM_X + threadIdx.x;
if (i < CACHE_SIZE) {
const KeyT n = s_cache[i];
for (int k = 0; n != EMPTY_KEY && k < len; k++) {
if (n == s_keys[k]) {
s_keys[k] = EMPTY_KEY;
}
}
}
}
for (int k = 0; k < len; k++) {
__syncthreads();
const KeyT other_n = s_keys[k];
if (other_n == EMPTY_KEY) continue;
const KeyT other_m =
(d_translation == nullptr) ? other_n : d_translation[other_n];
const ValueT dist = rs_dist_calc.distance_synced(other_m);
if (criteria(dist)) {
push(other_n, dist);
__syncthreads();
}
}
__syncthreads();
}
__device__ __forceinline__ void transform(const KeyT* transform) {
for (int item = 0; item < CACHE_ITEMS_PER_THREAD; ++item) {
const int i = item * BLOCK_DIM_X + threadIdx.x;
if (i < BEST_SIZE) {
// transform best
KeyT key = s_cache[i];
if (key != EMPTY_KEY)
key = transform[key];
s_cache[i] = key;
// copy best into prio queue
if (i+BEST_SIZE < SORTED_SIZE) {
s_cache[i+BEST_SIZE] = key;
s_dists[i+BEST_SIZE] = s_dists[i];
}
}
else if (i < 2*BEST_SIZE && i < SORTED_SIZE) {
// do nothing (handled by previous threads)
}
else if (i < CACHE_SIZE) {
// reset remainder of the prio queue and visited cache
s_cache[i] = EMPTY_KEY;
if (i < SORTED_SIZE)
s_dists[i] = EMPTY_DIST;
}
}
// reset heads.
if (!threadIdx.x) {
s_prioQ_head = 0;
s_visited_head = 0;
}
}
__device__ __forceinline__ void write_best_graph(KeyT* d_buffer, const KeyT n,
int K, int offset = 1) {
for (int i = threadIdx.x; i < K; i += BLOCK_DIM_X) {
const KeyT idx = s_cache[i + offset];
d_buffer[n * K + i] = (idx != EMPTY_KEY) ? idx : n;
}
}
__device__ __forceinline__ void write_best(KeyT* d_buffer, const KeyT n,
int stride) {
for (int i = threadIdx.x; i < KQuery; i += BLOCK_DIM_X) {
const KeyT idx = s_cache[i];
d_buffer[n * stride + i] = idx;
}
}
template <DistanceMeasure m = measure, typename std::enable_if<m == Euclidean, int>::type = 0> // euclidean distance version
__device__ __forceinline__ float get_nn1_dist() {
return sqrtf(s_dists[1]);
}
template <DistanceMeasure m = measure, typename std::enable_if<m == Cosine, int>::type = 0> // cosine similarity version
__device__ __forceinline__ float get_nn1_dist() {
return s_dists[1];
}
__device__ __forceinline__ int get_dist_stats() { return dist_calc_counter; }
__device__ __forceinline__ int get_overflow_stats() {
return s_overflow_counter;
}
/**
* Prints first 'len' elements in the Cache. [parallel call]:
* cash.print(8);
*
*/
__device__ __forceinline__ void print(int len = CACHE_SIZE) {
__syncthreads();
if (!threadIdx.x) printf("print \n");
if (!threadIdx.x) {
printf("Cache: ring: %d KQuery: %f (BEST_SIZE -> %f) \n", s_prioQ_head,
s_dists[KQuery - 1], s_dists[BEST_SIZE - 1]);
for (int i = 0; i < len; ++i) {
if (i < BEST_SIZE) {
printf("%d -> %d %f \n", i, s_cache[i], s_dists[i]);
} else {
if (i < SORTED_SIZE) {
printf("%d -> %d %f | ", i, s_cache[i], s_dists[i]);
if (i - BEST_SIZE == s_prioQ_head) printf("X");
printf("\n");
} else {
printf("%d -> %d | ", i, s_cache[i]);
if (i - SORTED_SIZE == s_visited_head) printf("X");
printf("\n");
}
}
}
}
__syncthreads();
}
};
#endif // INCLUDE_GGNN_CACHE_CUDA_SIMPLE_KNN_CACHE_NO_SLACK_CUH_
|
the_stack
|
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THC.h>
#include <THC/THCAtomics.cuh>
#include <THC/THCDeviceUtils.cuh>
// TODO make it in a common file
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
template <typename T> __device__ T BEZIER_CURVE(T p0, T p1, T p2, T p3, const T u) {
return ((1. - u) * (1. - u) * (1. - u) * p0 +
3. * u * (1. - u) * (1. - u) * p1 + 3. * u * u * (1. - u) * p2 +
u * u * u * p3);
}
/**
* bilinear_interpolate
* sample one point at (x, y) from bottom_data
* @param bottom_data array of feature map
* @param height, width size of feature map
* @param y, x sample location
*/
template <typename T>
__device__ T bilinear_interpolate(const T *bottom_data, const int height,
const int width, T y, T x,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
return 0;
}
if (y <= 0)
y = 0;
if (x <= 0)
x = 0;
int y_low = (int)y;
int x_low = (int)x;
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T)x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// do bilinear interpolation
T v1 = bottom_data[y_low * width + x_low];
T v2 = bottom_data[y_low * width + x_high];
T v3 = bottom_data[y_high * width + x_low];
T v4 = bottom_data[y_high * width + x_high];
T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename T>
__global__ void
BezierAlignForward(const int nthreads, const T *bottom_data,
const T spatial_scale, const int channels, const int height,
const int width, const int pooled_height,
const int pooled_width, const int sampling_ratio,
const T *bottom_beziers, T *top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
// beziers have size Nx(1+8x2)
const T *offset_bottom_beziers = bottom_beziers + n * (1 + 8 * 2);
int bezier_batch_ind = offset_bottom_beziers[0];
// Do not using rounding; this implementation detail is critical
T p0_y = offset_bottom_beziers[1] * spatial_scale;
T p0_x = offset_bottom_beziers[2] * spatial_scale;
T p1_y = offset_bottom_beziers[3] * spatial_scale;
T p1_x = offset_bottom_beziers[4] * spatial_scale;
T p2_y = offset_bottom_beziers[5] * spatial_scale;
T p2_x = offset_bottom_beziers[6] * spatial_scale;
T p3_y = offset_bottom_beziers[7] * spatial_scale;
T p3_x = offset_bottom_beziers[8] * spatial_scale;
T p4_y = offset_bottom_beziers[9] * spatial_scale;
T p4_x = offset_bottom_beziers[10] * spatial_scale;
T p5_y = offset_bottom_beziers[11] * spatial_scale;
T p5_x = offset_bottom_beziers[12] * spatial_scale;
T p6_y = offset_bottom_beziers[13] * spatial_scale;
T p6_x = offset_bottom_beziers[14] * spatial_scale;
T p7_y = offset_bottom_beziers[15] * spatial_scale;
T p7_x = offset_bottom_beziers[16] * spatial_scale;
const T *offset_bottom_data =
bottom_data + (bezier_batch_ind * channels + c) * height * width;
// compute the coords
const T u = pw / static_cast<T>(pooled_width);
const T v = ph / static_cast<T>(pooled_height);
const T y0 = BEZIER_CURVE(p0_y, p1_y, p2_y, p3_y, u);
const T x0 = BEZIER_CURVE(p0_x, p1_x, p2_x, p3_x, u);
const T y1 = BEZIER_CURVE(p4_y, p5_y, p6_y, p7_y, u);
const T x1 = BEZIER_CURVE(p4_x, p5_x, p6_x, p7_x, u);
const T y = y1 * v + y0 * (1. - v);
const T x = x1 * v + x0 * (1. - v);
top_data[index] =
bilinear_interpolate(offset_bottom_data, height, width, y, x, index);
}
}
template <typename T>
__device__ void
bilinear_interpolate_gradient(const int height, const int width, T y, T x,
T &w1, T &w2, T &w3, T &w4, int &x_low,
int &x_high, int &y_low, int &y_high,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
w1 = w2 = w3 = w4 = 0.;
x_low = x_high = y_low = y_high = -1;
return;
}
if (y <= 0)
y = 0;
if (x <= 0)
x = 0;
y_low = (int)y;
x_low = (int)x;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T)x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// reference in forward
// T v1 = bottom_data[y_low * width + x_low];
// T v2 = bottom_data[y_low * width + x_high];
// T v3 = bottom_data[y_high * width + x_low];
// T v4 = bottom_data[y_high * width + x_high];
// T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
return;
}
template <typename T>
__global__ void BezierAlignBackwardFeature(
const int nthreads, const T *top_diff, const int num_beziers,
const T spatial_scale, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int sampling_ratio, T *bottom_diff, const T *bottom_beziers) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T *offset_bottom_beziers = bottom_beziers + n * (1 + 8 * 2);
int bezier_batch_ind = offset_bottom_beziers[0];
// Do not using rounding; this implementation detail is critical
T p0_y = offset_bottom_beziers[1] * spatial_scale;
T p0_x = offset_bottom_beziers[2] * spatial_scale;
T p1_y = offset_bottom_beziers[3] * spatial_scale;
T p1_x = offset_bottom_beziers[4] * spatial_scale;
T p2_y = offset_bottom_beziers[5] * spatial_scale;
T p2_x = offset_bottom_beziers[6] * spatial_scale;
T p3_y = offset_bottom_beziers[7] * spatial_scale;
T p3_x = offset_bottom_beziers[8] * spatial_scale;
T p4_y = offset_bottom_beziers[9] * spatial_scale;
T p4_x = offset_bottom_beziers[10] * spatial_scale;
T p5_y = offset_bottom_beziers[11] * spatial_scale;
T p5_x = offset_bottom_beziers[12] * spatial_scale;
T p6_y = offset_bottom_beziers[13] * spatial_scale;
T p6_x = offset_bottom_beziers[14] * spatial_scale;
T p7_y = offset_bottom_beziers[15] * spatial_scale;
T p7_x = offset_bottom_beziers[16] * spatial_scale;
T *offset_bottom_diff =
bottom_diff + (bezier_batch_ind * channels + c) * height * width;
int top_offset = (n * channels + c) * pooled_height * pooled_width;
const T *offset_top_diff = top_diff + top_offset;
const T top_diff_this_bin = offset_top_diff[ph * pooled_width + pw];
T w1, w2, w3, w4;
int x_low, x_high, y_low, y_high;
// compute the coords
const T u = pw / static_cast<T>(pooled_width);
const T v = ph / static_cast<T>(pooled_height);
const T y0 = BEZIER_CURVE(p0_y, p1_y, p2_y, p3_y, u);
const T x0 = BEZIER_CURVE(p0_x, p1_x, p2_x, p3_x, u);
const T y1 = BEZIER_CURVE(p4_y, p5_y, p6_y, p7_y, u);
const T x1 = BEZIER_CURVE(p4_x, p5_x, p6_x, p7_x, u);
const T y = y1 * v + y0 * (1. - v);
const T x = x1 * v + x0 * (1. - v);
bilinear_interpolate_gradient(height, width, y, x, w1, w2, w3, w4, x_low,
x_high, y_low, y_high, index);
if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) {
atomicAdd(offset_bottom_diff + y_low * width + x_low,
static_cast<T>(top_diff_this_bin * w1));
atomicAdd(offset_bottom_diff + y_low * width + x_high,
static_cast<T>(top_diff_this_bin * w2));
atomicAdd(offset_bottom_diff + y_high * width + x_low,
static_cast<T>(top_diff_this_bin * w3));
atomicAdd(offset_bottom_diff + y_high * width + x_high,
static_cast<T>(top_diff_this_bin * w4));
}
} // CUDA_1D_KERNEL_LOOP
} // BezierAlignBackward
at::Tensor
BezierAlign_forward_cuda(const at::Tensor &input, const at::Tensor &beziers,
const float spatial_scale, const int pooled_height,
const int pooled_width, const int sampling_ratio) {
AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(beziers.type().is_cuda(), "beziers must be a CUDA tensor");
auto num_beziers = beziers.size(0);
auto channels = input.size(1);
auto height = input.size(2);
auto width = input.size(3);
auto output = at::empty({num_beziers, channels, pooled_height, pooled_width},
input.options());
auto output_size = num_beziers * pooled_height * pooled_width * channels;
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 grid(std::min(THCCeilDiv((long)output_size, 512L), 4096L));
dim3 block(512);
if (output.numel() == 0) {
THCudaCheck(cudaGetLastError());
return output;
}
AT_DISPATCH_FLOATING_TYPES(input.type(), "BezierAlign_forward", [&] {
BezierAlignForward<scalar_t><<<grid, block, 0, stream>>>(
output_size, input.contiguous().data<scalar_t>(), spatial_scale,
channels, height, width, pooled_height, pooled_width, sampling_ratio,
beziers.contiguous().data<scalar_t>(), output.data<scalar_t>());
});
THCudaCheck(cudaGetLastError());
return output;
}
// TODO remove the dependency on input and use instead its sizes -> save memory
at::Tensor
BezierAlign_backward_cuda(const at::Tensor &grad, const at::Tensor &beziers,
const float spatial_scale, const int pooled_height,
const int pooled_width, const int batch_size,
const int channels, const int height, const int width,
const int sampling_ratio) {
AT_ASSERTM(grad.type().is_cuda(), "grad must be a CUDA tensor");
AT_ASSERTM(beziers.type().is_cuda(), "beziers must be a CUDA tensor");
auto num_beziers = beziers.size(0);
auto grad_input =
at::zeros({batch_size, channels, height, width}, grad.options());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 grid(std::min(THCCeilDiv((long)grad.numel(), 512L), 4096L));
dim3 block(512);
// handle possibly empty gradients
if (grad.numel() == 0) {
THCudaCheck(cudaGetLastError());
return grad_input;
}
AT_DISPATCH_FLOATING_TYPES(grad.type(), "BezierAlign_backward", [&] {
BezierAlignBackwardFeature<scalar_t><<<grid, block, 0, stream>>>(
grad.numel(), grad.contiguous().data<scalar_t>(), num_beziers,
spatial_scale, channels, height, width, pooled_height, pooled_width,
sampling_ratio, grad_input.data<scalar_t>(),
beziers.contiguous().data<scalar_t>());
});
THCudaCheck(cudaGetLastError());
return grad_input;
}
|
the_stack
|
#include "common.cuh"
#include <kat/on_device/collaboration/grid.cuh>
#include <kat/on_device/collaboration/block.cuh>
#include <kat/on_device/collaboration/warp.cuh>
#include <kat/on_device/atomics.cuh>
using std::size_t;
using kat::warp_size;
using fake_bool = int8_t; // so as not to have trouble with vector<bool>
#if __cplusplus < 201701L
#include <experimental/optional>
template <typename T>
using optional = std::experimental::optional<T>;
#else
template <typename T>
#include <optional>
using optional = std::optional<T>;
#endif
template <typename T>
const auto make_exact_comparison { optional<T>{} };
namespace kcw = ::kat::collaborative::warp;
namespace klcw = ::kat::linear_grid::collaborative::warp;
std::ostream& operator<<(std::ostream& os, klcw::detail::predicate_computation_length_slack_t ss)
{
switch(ss) {
case klcw::detail::predicate_computation_length_slack_t::has_no_slack:
os << "has_no_slack"; break;
case klcw::detail::predicate_computation_length_slack_t::may_have_arbitrary_slack:
os << "may_have_arbitrary_slack"; break;
case klcw::detail::predicate_computation_length_slack_t::may_have_full_warps_of_slack:
default:
os << "may_have_full_warps_of_slack:";
}
return os;
}
namespace kernels {
template <typename F, typename T, typename... Is>
__global__ void execute_testcase(
F testcase_device_function,
size_t num_values_to_populate,
T* __restrict__ values_to_populate,
const Is* __restrict__ ... inputs
)
{
testcase_device_function(num_values_to_populate, values_to_populate, inputs...);
}
} // namespace kernels
namespace kat {
namespace linear_grid {
namespace collaborative {
namespace warp {
template <typename T>
std::ostream& operator<<(std::ostream& os, search_result_t<T> sr)
{
if (not sr.is_set()) {
return os << "(not found)";
}
return os << "value " << sr.value << " in lane " << sr.lane_index;
}
template <typename T>
KAT_FHD bool operator==(const search_result_t<T>& lhs, const search_result_t<T>& rhs)
{
return
(lhs.lane_index == rhs.lane_index)
and ( (not lhs.is_set() ) or (lhs.value == rhs.value) );
}
} // namespace warp
} // namespace collaborative
} // namespace linear_grid
} // namespace kat
template <typename T>
std::size_t set_width_for_up_to(T max)
{
// assert(std::is_integral<I>::value, "Only integer types supported for now");
std::stringstream ss;
ss << std::dec << max;
return ss.str().length();
}
namespace detail {
template <typename T>
auto tolerance_gadget(std::true_type, T x, optional<T> tolerance) {
auto eps = tolerance.value_or(0);
return doctest::Approx(x).epsilon(eps);
}
template <typename T>
T tolerance_gadget(std::false_type, T x, optional<T>) { return x; }
} // namespace detail
template <typename T>
auto tolerance_gadget(T x, optional<T> tolerance)
{
constexpr const auto is_arithmetic = std::is_arithmetic< std::decay_t<T> >::value;
return
detail::tolerance_gadget(std::integral_constant<bool, is_arithmetic>{}, x, tolerance);
}
template <typename T, typename F, typename... Is>
void check_results(
std::string title,
size_t num_values_to_check,
const T* __restrict__ actual_values,
F expected_value_retriever,
optional<T> comparison_tolerance_fraction,
const Is* __restrict__... inputs)
{
std::stringstream ss;
auto index_width = set_width_for_up_to(num_values_to_check);
// TODO: Consider using the maximum/minimum result values to set field widths.
for(size_t i = 0; i < num_values_to_check; i++) {
ss.str("");
ss
<< "Assertion " << std::setw(index_width) << (i+1) << " for " << title
// << " :\n"
<< "(" << std::make_tuple(inputs[i]...) << ")"
;
auto mismatch_message { ss.str() };
if (comparison_tolerance_fraction) {
const auto& actual = actual_values[i];
const auto expected = tolerance_gadget(expected_value_retriever(i), comparison_tolerance_fraction);
CHECK_MESSAGE(actual == expected, mismatch_message);
}
else {
const auto& ev = expected_value_retriever(i);
const auto& actual = actual_values[i];
const auto expected = expected_value_retriever(i);
CHECK_MESSAGE(actual == expected, mismatch_message);
}
}
}
template <typename T, typename F, typename... Is>
void check_results(
size_t num_values_to_check,
const T* __restrict__ actual_values,
F expected_value_retriever,
optional<T> comparison_tolerance_fraction,
const Is* __restrict__... inputs)
{
return check_results(
std::string("testcase ") + doctest::current_test_name(),
num_values_to_check,
actual_values,
expected_value_retriever,
comparison_tolerance_fraction,
inputs...);
}
template <typename T>
struct tag {};
/**
* @brief Executes a testcase intended to make certain checks using a GPU kernel
* which produces the values to check for.
*
* @note The actual checks are eventually conducted on the host side, since doctest
* code can't actually do anything useful on the GPU. So on the GPU side we "merely"
* compute the values to check and let the test logic peform the actual comparison later
* on.
*/
template <typename F, typename K, typename T, typename... Is, size_t... Indices>
auto execute_testcase_on_gpu(
tag<T>,
std::index_sequence<Indices...>,
K testcase_kernel,
F testcase_device_function,
cuda::launch_configuration_t launch_config,
size_t num_values_to_populate,
Is* __restrict__ ... inputs)
{
cuda::device_t device { cuda::device::current::get() };
auto device_side_results { cuda::memory::device::make_unique<T[]>(device, num_values_to_populate) };
cuda::memory::device::zero(device_side_results.get(), num_values_to_populate * sizeof(T)); // just to be on the safe side
auto host_side_results { std::vector<T>(num_values_to_populate) };
auto make_device_side_input = [&device, num_values_to_populate](auto input, size_t n) {
using input_type = std::remove_reference_t<decltype(*input)>;
auto device_side_input = cuda::memory::device::make_unique<input_type[]>(device, n);
cuda::memory::copy(device_side_input.get(), input, num_values_to_populate * sizeof(input_type));
return std::move(device_side_input);
};
auto device_side_inputs = std::make_tuple( make_device_side_input(inputs, num_values_to_populate)... );
ignore(device_side_inputs); // for the case of no inputs
cuda::launch(
testcase_kernel,
launch_config,
testcase_device_function,
num_values_to_populate,
device_side_results.get(),
std::get<Indices>(device_side_inputs).get()... );
cuda::memory::copy(host_side_results.data(), device_side_results.get(), sizeof(T) * num_values_to_populate);
return host_side_results;
}
template <typename F, typename ExpectedResultRetriever, typename T, typename... Is>
void execute_non_uniform_testcase_on_gpu_and_check(
F testcase_device_function,
ExpectedResultRetriever expected_value_retriever,
size_t num_values_to_populate,
cuda::grid::dimensions_t grid_dimensions,
cuda::grid::block_dimensions_t block_dimensions,
optional<T> comparison_tolerance_fraction,
Is* __restrict__ ... inputs)
{
auto launch_config { cuda::make_launch_config(grid_dimensions, block_dimensions) };
auto host_side_results = execute_testcase_on_gpu(
tag<T>{},
typename std::make_index_sequence<sizeof...(Is)> {},
kernels::execute_testcase<F, T, Is...>,
testcase_device_function,
launch_config,
num_values_to_populate,
inputs...
);
check_results (
num_values_to_populate,
// perhaps add another parameter for specific testcase details?
host_side_results.data(),
expected_value_retriever,
comparison_tolerance_fraction,
inputs...);
}
template <typename F, typename T, typename... Is>
auto execute_non_uniform_testcase_on_gpu(
tag<T>,
F testcase_device_function,
size_t num_values_to_populate,
cuda::grid::dimensions_t grid_dimensions,
cuda::grid::block_dimensions_t block_dimensions,
Is* __restrict__ ... inputs)
{
auto launch_config { cuda::make_launch_config(grid_dimensions, block_dimensions) };
return execute_testcase_on_gpu(
tag<T>{},
typename std::make_index_sequence<sizeof...(Is)> {},
kernels::execute_testcase<F, T, Is...>,
testcase_device_function,
launch_config,
num_values_to_populate,
inputs...
);
}
TEST_SUITE("warp-level - general grid") {
TEST_CASE("all_lanes_satisfy") {
using predicate_type = int;
cuda::grid::dimensions_t grid_dimensions { 1 };
cuda::grid::block_dimensions_t block_dimensions { warp_size * 3 };
// TODO: What about when the last warp is not full?
auto num_total_threads = block_dimensions.volume() * grid_dimensions.volume();
auto num_values_to_populate = num_total_threads;
auto make_thread_value = [] KAT_HD (unsigned warp_id, unsigned lane_id)
{
switch (warp_id) {
case 0: return predicate_type(0);
case 1: return predicate_type(1);
case 2: return predicate_type(lane_id % 2);
default: return predicate_type(lane_id != 7);
}
};
auto testcase_device_function =
[=] KAT_DEV (
size_t,
predicate_type* thread_obtained_values
)
{
namespace gi = kat::grid_info;
auto thread_value { make_thread_value(gi::warp::id_in_block(), gi::lane::id()) };
auto obtained_value { kcw::all_lanes_satisfy(thread_value) };
thread_obtained_values[gi::thread::global_id()] = obtained_value;
};
auto expected_value_retriever = [=] (size_t global_thread_id) {
predicate_type warp_values[warp_size];
auto warp_id { (global_thread_id % block_dimensions.volume()) / warp_size };
bool all_satisfy { true };
for(unsigned lane_id = 0; lane_id < warp_size; lane_id++) {
warp_values[lane_id] = make_thread_value(warp_id, lane_id);
all_satisfy = all_satisfy and warp_values[lane_id];
}
return predicate_type{all_satisfy};
};
execute_non_uniform_testcase_on_gpu_and_check(
testcase_device_function,
expected_value_retriever,
num_values_to_populate,
grid_dimensions,
block_dimensions,
make_exact_comparison<predicate_type>
);
}
TEST_CASE("no_lanes_satisfy") {
using predicate_type = int;
cuda::grid::dimensions_t grid_dimensions { 1 };
cuda::grid::block_dimensions_t block_dimensions { warp_size * 3 };
// TODO: What about when the last warp is not full?
auto num_total_threads = block_dimensions.volume() * grid_dimensions.volume();
auto num_values_to_populate = num_total_threads;
auto make_thread_value = [] KAT_HD (unsigned warp_id, unsigned lane_id)
{
switch (warp_id) {
case 0: return predicate_type(0);
case 1: return predicate_type(1);
case 2: return predicate_type(lane_id % 2);
default: return predicate_type(lane_id != 7);
}
};
auto testcase_device_function =
[=] KAT_DEV (
size_t,
predicate_type* thread_obtained_values
)
{
namespace gi = kat::grid_info;
auto thread_value { make_thread_value(gi::warp::id_in_block(), gi::lane::id()) };
auto obtained_value { kcw::no_lanes_satisfy(thread_value) };
thread_obtained_values[gi::thread::global_id()] = obtained_value;
};
auto expected_value_retriever = [=] (size_t global_thread_id) {
predicate_type warp_values[warp_size];
auto warp_id { (global_thread_id % block_dimensions.volume()) / warp_size };
bool any_satisfy { false };
for(unsigned lane_id = 0; lane_id < warp_size; lane_id++) {
warp_values[lane_id] = make_thread_value(warp_id, lane_id);
any_satisfy = any_satisfy or warp_values[lane_id];
}
return predicate_type{not any_satisfy};
};
execute_non_uniform_testcase_on_gpu_and_check(
testcase_device_function,
expected_value_retriever,
num_values_to_populate,
grid_dimensions,
block_dimensions,
make_exact_comparison<predicate_type>
);
}
TEST_CASE("all_lanes_agree_on") {
using predicate_type = int;
cuda::grid::dimensions_t grid_dimensions { 1 };
cuda::grid::block_dimensions_t block_dimensions { warp_size * 3 };
// TODO: What about when the last warp is not full?
auto num_total_threads = block_dimensions.volume() * grid_dimensions.volume();
auto num_values_to_populate = num_total_threads;
auto make_thread_value = [] KAT_HD (unsigned warp_id, unsigned lane_id)
{
switch (warp_id) {
case 0: return predicate_type(0);
case 1: return predicate_type(1);
case 2: return predicate_type(lane_id % 2);
default: return predicate_type(lane_id != 7);
}
};
auto testcase_device_function =
[=] KAT_DEV (
size_t,
predicate_type* thread_obtained_values
)
{
namespace gi = kat::grid_info;
auto thread_value { make_thread_value(gi::warp::id_in_block(), gi::lane::id()) };
auto obtained_value { kcw::all_lanes_agree_on(thread_value) };
thread_obtained_values[gi::thread::global_id()] = obtained_value;
};
auto expected_value_retriever = [=] (size_t global_thread_id) {
predicate_type warp_values[warp_size];
auto warp_id { (global_thread_id % block_dimensions.volume()) / warp_size };
bool any_satisfy { false };
bool any_dont_satisfy { false };
for(unsigned lane_id = 0; lane_id < warp_size; lane_id++) {
warp_values[lane_id] = make_thread_value(warp_id, lane_id);
any_satisfy = any_satisfy or warp_values[lane_id];
any_dont_satisfy = any_dont_satisfy or (not warp_values[lane_id]);
}
return predicate_type{
(any_satisfy and not any_dont_satisfy) or
(any_dont_satisfy and not any_satisfy)
};
};
execute_non_uniform_testcase_on_gpu_and_check(
testcase_device_function,
expected_value_retriever,
num_values_to_populate,
grid_dimensions,
block_dimensions,
make_exact_comparison<predicate_type>
);
}
TEST_CASE("some_lanes_satisfy") {
using predicate_type = int;
cuda::grid::dimensions_t grid_dimensions { 1 };
cuda::grid::block_dimensions_t block_dimensions { warp_size * 3 };
// TODO: What about when the last warp is not full?
auto num_total_threads = block_dimensions.volume() * grid_dimensions.volume();
auto num_values_to_populate = num_total_threads;
auto make_thread_value = [] KAT_HD (unsigned warp_id, unsigned lane_id)
{
switch (warp_id) {
case 0: return predicate_type(0);
case 1: return predicate_type(1);
case 2: return predicate_type(lane_id % 2);
default: return predicate_type(lane_id != 7);
}
};
auto testcase_device_function =
[=] KAT_DEV (
size_t,
predicate_type* thread_obtained_values
)
{
namespace gi = kat::grid_info;
auto thread_value { make_thread_value(gi::warp::id_in_block(), gi::lane::id()) };
auto obtained_value { kcw::some_lanes_satisfy(thread_value) };
thread_obtained_values[gi::thread::global_id()] = obtained_value;
};
auto expected_value_retriever = [=] (size_t global_thread_id) {
predicate_type warp_values[warp_size];
auto warp_id { (global_thread_id % block_dimensions.volume()) / warp_size };
bool any_satisfy { false };
for(unsigned lane_id = 0; lane_id < warp_size; lane_id++) {
warp_values[lane_id] = make_thread_value(warp_id, lane_id);
any_satisfy = any_satisfy or warp_values[lane_id];
}
return predicate_type{any_satisfy};
};
execute_non_uniform_testcase_on_gpu_and_check(
testcase_device_function,
expected_value_retriever,
num_values_to_populate,
grid_dimensions,
block_dimensions,
make_exact_comparison<predicate_type>
);
}
TEST_CASE("num_lanes_agreeing_on") {
using predicate_type = int;
cuda::grid::dimensions_t grid_dimensions { 1 };
cuda::grid::block_dimensions_t block_dimensions { warp_size * 3 };
// TODO: What about when the last warp is not full?
auto num_total_threads = block_dimensions.volume() * grid_dimensions.volume();
auto num_values_to_populate = num_total_threads;
auto make_thread_value = [] KAT_HD (unsigned warp_id, unsigned lane_id)
{
switch (warp_id) {
case 0: return predicate_type(0);
case 1: return predicate_type(1);
case 2: return predicate_type(lane_id % 2);
default: return predicate_type(lane_id != 7);
}
};
auto testcase_device_function =
[=] KAT_DEV (
size_t,
predicate_type* thread_obtained_values
)
{
namespace gi = kat::grid_info;
auto thread_value { make_thread_value(gi::warp::id_in_block(), gi::lane::id()) };
auto obtained_value { kcw::num_lanes_agreeing_on(thread_value) };
thread_obtained_values[gi::thread::global_id()] = obtained_value;
};
auto expected_value_retriever = [=] (size_t global_thread_id) {
auto warp_id { (global_thread_id % block_dimensions.volume()) / warp_size };
auto lane_id { global_thread_id % warp_size };
auto thread_value = make_thread_value(warp_id, lane_id);
auto num_lanes_agreeing { 0 };
for(unsigned other_lane_id = 0; other_lane_id < warp_size; other_lane_id++) {
if (thread_value == make_thread_value(warp_id, other_lane_id))
{ num_lanes_agreeing++; }
}
return predicate_type{num_lanes_agreeing};
};
execute_non_uniform_testcase_on_gpu_and_check(
testcase_device_function,
expected_value_retriever,
num_values_to_populate,
grid_dimensions,
block_dimensions,
make_exact_comparison<predicate_type>
);
}
TEST_CASE("majority_vote") {
using predicate_type = int;
cuda::grid::dimensions_t grid_dimensions { 1 };
cuda::grid::block_dimensions_t block_dimensions { warp_size * 3 };
// TODO: What about when the last warp is not full?
auto num_total_threads = block_dimensions.volume() * grid_dimensions.volume();
auto num_values_to_populate = num_total_threads;
auto make_thread_value = [] KAT_HD (unsigned warp_id, unsigned lane_id)
{
switch (warp_id) {
case 0: return predicate_type(0);
case 1: return predicate_type(1);
case 2: return predicate_type(lane_id % 2);
default: return predicate_type(lane_id != 7);
}
};
auto testcase_device_function =
[=] KAT_DEV (
size_t,
predicate_type* thread_obtained_values
)
{
namespace gi = kat::grid_info;
auto thread_value { make_thread_value(gi::warp::id_in_block(), gi::lane::id()) };
auto obtained_value { kcw::majority_vote(thread_value) };
thread_obtained_values[gi::thread::global_id()] = obtained_value;
};
auto expected_value_retriever = [=] (size_t global_thread_id) {
auto warp_id { (global_thread_id % block_dimensions.volume()) / warp_size };
auto lane_id { global_thread_id % warp_size };
auto thread_value = make_thread_value(warp_id, lane_id);
int vote_balance { 0 };
for(unsigned other_lane_id = 0; other_lane_id < warp_size; other_lane_id++) {
vote_balance += make_thread_value(warp_id, other_lane_id) ? 1 : -1;
}
return predicate_type{vote_balance > 0};
};
execute_non_uniform_testcase_on_gpu_and_check(
testcase_device_function,
expected_value_retriever,
num_values_to_populate,
grid_dimensions,
block_dimensions,
make_exact_comparison<predicate_type>
);
}
#if 0
#if !defined(__CUDA_ARCH__) or __CUDA_ARCH__ >= 700
TEST_CASE("in_unique_lane_with") {
cuda::device_t device { cuda::device::current::get() };
if (device.properties().compute_capability() < cuda::device::make_compute_capability(7,0)) {
return;
}
using predicate_type = int;
using datum_type = uint32_t;
cuda::grid::dimensions_t grid_dimensions { 1 };
cuda::grid::block_dimensions_t block_dimensions { warp_size * 3 };
// TODO: What about when the last warp is not full?
auto num_total_threads = block_dimensions.volume() * grid_dimensions.volume();
auto num_values_to_populate = num_total_threads;
auto make_thread_value = [] KAT_HD (unsigned warp_id, unsigned lane_id)
{
switch (warp_id) {
case 0: return datum_type{12};
case 1: return datum_type{34};
case 2: return lane_id;
default: return (lane_id == 5 ? lane_id + 20 : lane_id);
}
};
auto testcase_device_function =
[=] KAT_DEV (
size_t,
predicate_type* thread_obtained_values
)
{
namespace gi = kat::grid_info;
auto thread_value { make_thread_value(gi::warp::id_in_block(), gi::lane::id()) };
auto obtained_value { kcw::in_unique_lane_with(thread_value) };
thread_obtained_values[gi::thread::global_id()] = obtained_value;
};
auto expected_value_retriever = [=] (size_t global_thread_id) {
auto warp_id { (global_thread_id % block_dimensions.volume()) / warp_size };
auto lane_id { global_thread_id % warp_size };
auto thread_value = make_thread_value(warp_id, lane_id);
bool am_unique { true };
for(unsigned other_lane_id = 0; other_lane_id < warp_size; other_lane_id++) {
if ((other_lane_id != lane_id) and (make_thread_value(warp_id, other_lane_id) == thread_value)) {
am_unique = false;
}
}
return predicate_type{am_unique};
};
execute_non_uniform_testcase_on_gpu_and_check(
testcase_device_function,
expected_value_retriever,
num_values_to_populate,
grid_dimensions,
block_dimensions,
make_exact_comparison<predicate_type>
);
}
#endif // !defined(__CUDA_ARCH__) or __CUDA_ARCH__ >= 700
#endif // if 0
TEST_CASE("get_from_lane") {
using checked_value_type = uint32_t;
cuda::grid::dimensions_t grid_dimensions { 1 };
cuda::grid::block_dimensions_t block_dimensions { warp_size * 3 };
// TODO: What about when the last warp is not full?
auto num_total_threads = block_dimensions.volume() * grid_dimensions.volume();
auto num_values_to_populate = num_total_threads;
auto make_thread_value = [] KAT_HD (unsigned thread_id)
{
return checked_value_type((thread_id + 1) * 10);
};
auto make_source_lane = [] KAT_HD (unsigned thread_id)
{
auto warp_id = thread_id / warp_size;
return (warp_id + 1) % warp_size;
};
auto testcase_device_function =
[=] KAT_DEV (size_t, checked_value_type* thread_obtained_values)
{
namespace gi = kat::grid_info;
auto thread_value { make_thread_value(gi::thread::id()) };
auto source_lane { make_source_lane(gi::thread::id()) };
auto obtained_value { kcw::get_from_lane(thread_value, source_lane) };
thread_obtained_values[gi::thread::global_id()] = obtained_value;
};
auto expected_value_retriever = [=] (size_t global_thread_id) {
auto thread_id { global_thread_id % block_dimensions.volume() };
auto source_lane { make_source_lane(thread_id) };
auto warp_id { thread_id / warp_size };
auto source_thread_id { warp_id * warp_size + source_lane };
checked_value_type source_thread_value { make_thread_value(source_thread_id) };
return source_thread_value;
};
execute_non_uniform_testcase_on_gpu_and_check(
testcase_device_function,
expected_value_retriever,
num_values_to_populate,
grid_dimensions,
block_dimensions,
make_exact_comparison<checked_value_type>
);
}
TEST_CASE("get_from_first_lane") {
using checked_value_type = uint32_t;
cuda::grid::dimensions_t grid_dimensions { 1 };
cuda::grid::block_dimensions_t block_dimensions { warp_size * 3 };
// TODO: What about when the last warp is not full?
auto num_total_threads = block_dimensions.volume() * grid_dimensions.volume();
auto num_values_to_populate = num_total_threads;
auto make_thread_value = [] KAT_HD (unsigned thread_id)
{
return checked_value_type((thread_id + 1) * 10);
};
auto testcase_device_function =
[=] KAT_DEV (size_t, checked_value_type* thread_obtained_values)
{
namespace gi = kat::grid_info;
auto thread_value { make_thread_value(gi::thread::id()) };
auto obtained_value { kcw::get_from_first_lane(thread_value) };
thread_obtained_values[gi::thread::global_id()] = obtained_value;
};
auto expected_value_retriever = [=] (size_t global_thread_id) {
auto thread_id { global_thread_id % block_dimensions.volume() };
auto warp_id { thread_id / warp_size };
auto source_thread_id { warp_id * warp_size + 0 };
checked_value_type source_thread_value { make_thread_value(source_thread_id) };
return source_thread_value;
};
execute_non_uniform_testcase_on_gpu_and_check(
testcase_device_function,
expected_value_retriever,
num_values_to_populate,
grid_dimensions,
block_dimensions,
make_exact_comparison<checked_value_type>
);
}
TEST_CASE("get_from_last_lane") {
using checked_value_type = uint32_t;
cuda::grid::dimensions_t grid_dimensions { 1 };
cuda::grid::block_dimensions_t block_dimensions { warp_size * 3 };
// TODO: What about when the last warp is not full?
auto num_total_threads = block_dimensions.volume() * grid_dimensions.volume();
auto num_values_to_populate = num_total_threads;
auto make_thread_value = [] KAT_HD (unsigned thread_id)
{
return checked_value_type((thread_id + 1) * 10);
};
auto testcase_device_function =
[=] KAT_DEV (size_t, checked_value_type* thread_obtained_values)
{
namespace gi = kat::grid_info;
auto thread_value { make_thread_value(gi::thread::id()) };
auto obtained_value { kcw::get_from_last_lane(thread_value) };
thread_obtained_values[gi::thread::global_id()] = obtained_value;
};
auto expected_value_retriever = [=] (size_t global_thread_id) {
auto thread_id { global_thread_id % block_dimensions.volume() };
auto warp_id { thread_id / warp_size };
auto source_thread_id { warp_id * warp_size + (warp_size - 1) };
checked_value_type source_thread_value { make_thread_value(source_thread_id) };
return source_thread_value;
};
execute_non_uniform_testcase_on_gpu_and_check(
testcase_device_function,
expected_value_retriever,
num_values_to_populate,
grid_dimensions,
block_dimensions,
make_exact_comparison<checked_value_type>
);
}
TEST_CASE("have_a_single_lane_compute") {
using checked_value_type = uint32_t;
cuda::grid::dimensions_t grid_dimensions { 1 };
cuda::grid::block_dimensions_t block_dimensions { warp_size * 3 };
// TODO: What about when the last warp is not full?
auto num_total_threads = block_dimensions.volume() * grid_dimensions.volume();
auto num_values_to_populate = num_total_threads;
auto make_thread_value = [] KAT_HD (unsigned thread_id)
{
return checked_value_type((thread_id + 1) * 10);
};
auto make_source_lane = [] KAT_HD (unsigned thread_id)
{
auto warp_id = thread_id / warp_size;
return (warp_id + 1) % warp_size;
};
auto testcase_device_function =
[=] KAT_DEV (size_t, checked_value_type* thread_obtained_values)
{
namespace gi = kat::grid_info;
auto source_lane { make_source_lane(gi::thread::id()) };
auto obtained_value =
kcw::have_a_single_lane_compute(
[=]() { return make_thread_value(gi::thread::id()); },
source_lane);
thread_obtained_values[gi::thread::global_id()] = obtained_value;
};
auto expected_value_retriever = [=] (size_t global_thread_id) {
auto thread_id { global_thread_id % block_dimensions.volume() };
auto source_lane { make_source_lane(thread_id) };
auto warp_id { thread_id / warp_size };
auto source_thread_id { warp_id * warp_size + source_lane };
checked_value_type source_thread_value { make_thread_value(source_thread_id) };
return source_thread_value;
};
execute_non_uniform_testcase_on_gpu_and_check(
testcase_device_function,
expected_value_retriever,
num_values_to_populate,
grid_dimensions,
block_dimensions,
make_exact_comparison<checked_value_type>
);
}
TEST_CASE("have_first_lane_compute") {
using checked_value_type = uint32_t;
cuda::grid::dimensions_t grid_dimensions { 1 };
cuda::grid::block_dimensions_t block_dimensions { warp_size * 3 };
// TODO: What about when the last warp is not full?
auto num_total_threads = block_dimensions.volume() * grid_dimensions.volume();
auto num_values_to_populate = num_total_threads;
auto make_thread_value = [] KAT_HD (unsigned thread_id)
{
return checked_value_type((thread_id + 1) * 10);
};
auto testcase_device_function =
[=] KAT_DEV (size_t, checked_value_type* thread_obtained_values)
{
namespace gi = kat::grid_info;
auto obtained_value =
kcw::have_first_lane_compute(
[=]() { return make_thread_value(gi::thread::id()); }
);
thread_obtained_values[gi::thread::global_id()] = obtained_value;
};
auto expected_value_retriever = [=] (size_t global_thread_id) {
auto thread_id { global_thread_id % block_dimensions.volume() };
auto warp_id { thread_id / warp_size };
auto source_thread_id { warp_id * warp_size + 0 };
checked_value_type source_thread_value { make_thread_value(source_thread_id) };
return source_thread_value;
};
execute_non_uniform_testcase_on_gpu_and_check(
testcase_device_function,
expected_value_retriever,
num_values_to_populate,
grid_dimensions,
block_dimensions,
make_exact_comparison<checked_value_type>
);
}
TEST_CASE("have_last_lane_compute") {
using checked_value_type = uint32_t;
cuda::grid::dimensions_t grid_dimensions { 1 };
cuda::grid::block_dimensions_t block_dimensions { warp_size * 3 };
// TODO: What about when the last warp is not full?
auto num_total_threads = block_dimensions.volume() * grid_dimensions.volume();
auto num_values_to_populate = num_total_threads;
auto make_thread_value = [] KAT_HD (unsigned thread_id)
{
return checked_value_type((thread_id + 1) * 10);
};
auto testcase_device_function =
[=] KAT_DEV (size_t, checked_value_type* thread_obtained_values)
{
namespace gi = kat::grid_info;
auto obtained_value =
kcw::have_last_lane_compute(
[=]() { return make_thread_value(gi::thread::id()); }
);
thread_obtained_values[gi::thread::global_id()] = obtained_value;
};
auto expected_value_retriever = [=] (size_t global_thread_id) {
auto thread_id { global_thread_id % block_dimensions.volume() };
auto warp_id { thread_id / warp_size };
auto source_thread_id { warp_id * warp_size + (warp_size - 1) };
checked_value_type source_thread_value { make_thread_value(source_thread_id) };
return source_thread_value;
};
execute_non_uniform_testcase_on_gpu_and_check(
testcase_device_function,
expected_value_retriever,
num_values_to_populate,
grid_dimensions,
block_dimensions,
make_exact_comparison<checked_value_type>
);
}
TEST_CASE("first_lane_satisfying") {
using predicate_type = int;
using checked_value_type = uint32_t;
cuda::grid::dimensions_t grid_dimensions { 1 };
cuda::grid::block_dimensions_t block_dimensions { warp_size * 4 };
// TODO: What about when the last warp is not full?
auto num_total_threads = block_dimensions.volume() * grid_dimensions.volume();
auto num_values_to_populate = num_total_threads;
auto make_thread_value = [] KAT_HD (unsigned warp_id, unsigned lane_id)
{
switch (warp_id) {
case 0: return predicate_type(0);
case 1: return predicate_type(1);
case 2: return predicate_type(lane_id > 5);
default: return predicate_type(lane_id != 7);
}
};
auto testcase_device_function =
[=] KAT_DEV (
size_t,
checked_value_type* thread_obtained_values
)
{
namespace gi = kat::grid_info;
auto thread_value { make_thread_value(gi::warp::id_in_block(), gi::lane::id()) };
auto obtained_value { kcw::first_lane_satisfying(thread_value) };
// if (threadIdx.x >= 64) printf("Thread %u value %u obtained %u\n", gi::thread::global_id(), thread_value, obtained_value);
thread_obtained_values[gi::thread::global_id()] = obtained_value;
};
auto expected_value_retriever = [=] (size_t global_thread_id) {
auto warp_id { (global_thread_id % block_dimensions.volume()) / warp_size };
// auto lane_id { global_thread_id % warp_size };
for(unsigned other_lane_id = 0; other_lane_id < warp_size; other_lane_id++) {
if (make_thread_value(warp_id, other_lane_id))
{ return checked_value_type{other_lane_id}; }
}
return checked_value_type{warp_size};
};
execute_non_uniform_testcase_on_gpu_and_check(
testcase_device_function,
expected_value_retriever,
num_values_to_populate,
grid_dimensions,
block_dimensions,
make_exact_comparison<checked_value_type>
);
}
TEST_CASE("get_active_lanes") {
using kat::lane_mask_t;
constexpr const lane_mask_t default_lane_mask_for_threads_who_go_inactive = 0xDEADBEEFu;
using predicate_type = bool;
using checked_value_type = lane_mask_t;
cuda::grid::dimensions_t grid_dimensions { 1 };
cuda::grid::block_dimensions_t block_dimensions { warp_size * 4 };
// TODO: What about when the last warp is not full?
auto num_total_threads = block_dimensions.volume() * grid_dimensions.volume();
auto num_values_to_populate = num_total_threads;
auto determine_whether_to_stay_active = [] KAT_HD (unsigned warp_id, unsigned lane_id)
{
switch (warp_id) {
case 0: return predicate_type(0); // all go inactive
case 1: return predicate_type(1); // all stay active
case 2: return predicate_type(lane_id > 5); // lanes 6 and up stay active
default: return predicate_type(lane_id != 7); // only lane 7 goes inactive
}
};
auto testcase_device_function =
[=] KAT_DEV (
size_t,
checked_value_type* thread_obtained_values
)
{
namespace gi = kat::grid_info;
bool should_stay_active { determine_whether_to_stay_active(gi::warp::id_in_block(), gi::lane::id()) };
// if (threadIdx.x < 32)
// printf("Thread %u %s stay active\n", gi::thread::id(), (should_stay_active ? "SHOULD" : "SHOULD NOT"));
if (not should_stay_active) {
thread_obtained_values[gi::thread::global_id()] =
default_lane_mask_for_threads_who_go_inactive;
return;
}
auto obtained_value { kcw::get_active_lanes() };
// if (threadIdx.x >= 64) printf("Thread %u value %u obtained %u\n", gi::thread::global_id(), thread_value, obtained_value);
thread_obtained_values[gi::thread::global_id()] = obtained_value;
};
auto expected_value_retriever = [=] (size_t global_thread_id) {
auto warp_id { (global_thread_id % block_dimensions.volume()) / warp_size };
auto lane_id { global_thread_id % warp_size };
if (not determine_whether_to_stay_active(warp_id, lane_id)) {
return default_lane_mask_for_threads_who_go_inactive;
}
lane_mask_t mask { 0 };
for(unsigned other_lane_id = 0; other_lane_id < warp_size; other_lane_id++) {
if (determine_whether_to_stay_active(warp_id, other_lane_id)) {
mask |= (1u << other_lane_id);
}
}
return mask;
};
execute_non_uniform_testcase_on_gpu_and_check(
testcase_device_function,
expected_value_retriever,
num_values_to_populate,
grid_dimensions,
block_dimensions,
make_exact_comparison<checked_value_type>
);
}
TEST_CASE("num_active_lanes") {
using kat::lane_mask_t;
using checked_value_type = unsigned;
constexpr const checked_value_type invalid_num_active_lanes = 33;
using predicate_type = bool;
cuda::grid::dimensions_t grid_dimensions { 1 };
cuda::grid::block_dimensions_t block_dimensions { warp_size * 4 };
// TODO: What about when the last warp is not full?
auto num_total_threads = block_dimensions.volume() * grid_dimensions.volume();
auto num_values_to_populate = num_total_threads;
auto determine_whether_to_stay_active = [] KAT_HD (unsigned warp_id, unsigned lane_id)
{
switch (warp_id) {
case 0: return predicate_type(0); // all go inactive
case 1: return predicate_type(1); // all stay active
case 2: return predicate_type(lane_id > 5); // lanes 6 and up stay active
default: return predicate_type(lane_id != 7); // only lane 7 goes inactive
}
};
auto testcase_device_function =
[=] KAT_DEV (
size_t,
checked_value_type* thread_obtained_values
)
{
namespace gi = kat::grid_info;
bool should_stay_active { determine_whether_to_stay_active(gi::warp::id_in_block(), gi::lane::id()) };
if (not should_stay_active) {
thread_obtained_values[gi::thread::global_id()] = invalid_num_active_lanes;
return;
}
auto obtained_value { kcw::num_active_lanes() };
// if (threadIdx.x >= 64) printf("Thread %u value %u obtained %u\n", gi::thread::global_id(), thread_value, obtained_value);
thread_obtained_values[gi::thread::global_id()] = obtained_value;
};
auto expected_value_retriever = [=] (size_t global_thread_id) {
auto warp_id { (global_thread_id % block_dimensions.volume()) / warp_size };
auto lane_id { global_thread_id % warp_size };
if (not determine_whether_to_stay_active(warp_id, lane_id)) {
return invalid_num_active_lanes;
}
auto active_count { 0u };
for(unsigned other_lane_id = 0; other_lane_id < warp_size; other_lane_id++) {
if (determine_whether_to_stay_active(warp_id, other_lane_id)) {
active_count++;
}
}
return checked_value_type{active_count};
};
execute_non_uniform_testcase_on_gpu_and_check(
testcase_device_function,
expected_value_retriever,
num_values_to_populate,
grid_dimensions,
block_dimensions,
make_exact_comparison<checked_value_type>
);
}
TEST_CASE("am_leader_lane") {
using kat::lane_mask_t;
using checked_value_type = fake_bool;
// Not using actual to not get in trouble with __nv_bool and with std::vector<bool>
using predicate_type = bool;
cuda::grid::dimensions_t grid_dimensions { 1 };
cuda::grid::block_dimensions_t block_dimensions { warp_size * 4 };
// TODO: What about when the last warp is not full?
auto num_total_threads = block_dimensions.volume() * grid_dimensions.volume();
auto num_warps_in_grid = num_total_threads / warp_size;
auto num_values_to_populate = num_total_threads;
auto determine_whether_to_stay_active = [] KAT_HD (unsigned warp_id, unsigned lane_id)
{
switch (warp_id) {
case 0: return predicate_type(0); // all go inactive
case 1: return predicate_type(1); // all stay active
case 2: return predicate_type(lane_id > 5); // lanes 6 and up stay active
default: return predicate_type(lane_id != 7); // only lane 7 goes inactive
}
};
auto testcase_device_function =
[=] KAT_DEV (
size_t,
checked_value_type* thread_obtained_values
)
{
namespace gi = kat::linear_grid::grid_info;
bool should_stay_active { determine_whether_to_stay_active(gi::warp::id_in_block(), gi::lane::id()) };
if (not should_stay_active) {
thread_obtained_values[gi::thread::global_id()] = false;
// if (threadIdx.x < 32) printf("Thread %u goes inactive\n", gi::thread::id());
return;
}
auto obtained_value { kcw::am_leader_lane() };
// if (threadIdx.x < 32) printf("Thread %u value believes it %s the leader\n", gi::thread::id(), (obtained_value ? "IS" : "IS NOT"));
thread_obtained_values[gi::thread::global_id()] = obtained_value;
};
// Note: Deviating from our usual checks, since the leader selection is not required
// to choose the first thread - so, we can't compare against a generated value;
// and the checks are at the warp level
auto thread_obtained_values = execute_non_uniform_testcase_on_gpu(
tag<checked_value_type>{},
testcase_device_function,
num_values_to_populate,
grid_dimensions,
block_dimensions
);
for(size_t warp_id = 0; warp_id < num_warps_in_grid; warp_id++) {
unsigned num_active_lanes { 0 };
for(unsigned lane_id = 0; lane_id < warp_size; lane_id++) {
if (determine_whether_to_stay_active(warp_id, lane_id)) { num_active_lanes++; }
}
auto warp_results_begin = thread_obtained_values.data() + warp_id * warp_size;
auto warp_results_end = warp_results_begin + warp_size;
auto num_presumptive_leaders = std::count(warp_results_begin, warp_results_end, true);
// std::cout << "warp " << warp_id << ": num_presumptive_leaders = " << num_presumptive_leaders << ", num_active_lanes = " << num_active_lanes << '\n';
bool have_presumptive_leaders = (num_presumptive_leaders > 0);
bool have_active_lanes = (num_active_lanes > 0);
if (have_active_lanes and not have_presumptive_leaders) {
auto error_message = "In warp " + std::to_string(warp_id) + " of the grid, some lanes were active, but no lane recognized itself as the leader";
if (have_active_lanes and not have_presumptive_leaders) {
CHECK_MESSAGE(false, error_message);
}
}
if (num_presumptive_leaders > 1) {
auto error_message = "In warp " + std::to_string(warp_id) + " of the grid, multiple lanes concluded they were the single leader lane";
CHECK_MESSAGE(false, error_message);
}
}
}
TEST_CASE("index_among_active_lanes") {
using kat::lane_mask_t;
using checked_value_type = unsigned;
constexpr const checked_value_type invalid_index = 33;
using predicate_type = bool;
cuda::grid::dimensions_t grid_dimensions { 1 };
cuda::grid::block_dimensions_t block_dimensions { warp_size * 4 };
// TODO: What about when the last warp is not full?
auto num_total_threads = block_dimensions.volume() * grid_dimensions.volume();
auto num_values_to_populate = num_total_threads;
auto determine_whether_to_stay_active = [] KAT_HD (unsigned warp_id, unsigned lane_id)
{
switch (warp_id) {
case 0: return predicate_type(0); // all go inactive
case 1: return predicate_type(1); // all stay active
case 2: return predicate_type(lane_id > 5); // lanes 6 and up stay active
default: return predicate_type(lane_id != 7); // only lane 7 goes inactive
}
};
auto testcase_device_function =
[=] KAT_DEV (
size_t,
checked_value_type* thread_obtained_values
)
{
namespace gi = kat::grid_info;
bool should_stay_active { determine_whether_to_stay_active(gi::warp::id_in_block(), gi::lane::id()) };
if (not should_stay_active) {
thread_obtained_values[gi::thread::global_id()] = invalid_index;
return;
}
auto obtained_value { kcw::index_among_active_lanes() };
thread_obtained_values[gi::thread::global_id()] = obtained_value;
};
auto expected_value_retriever = [=] (size_t global_thread_id) {
auto warp_id { (global_thread_id % block_dimensions.volume()) / warp_size };
auto lane_id { global_thread_id % warp_size };
if (not determine_whether_to_stay_active(warp_id, lane_id)) {
return invalid_index;
}
auto num_active_before_this_thread { 0u };
for(unsigned other_lane_id = 0; other_lane_id < lane_id; other_lane_id++) {
if (determine_whether_to_stay_active(warp_id, other_lane_id)) {
num_active_before_this_thread++;
}
}
return checked_value_type{num_active_before_this_thread};
};
execute_non_uniform_testcase_on_gpu_and_check(
testcase_device_function,
expected_value_retriever,
num_values_to_populate,
grid_dimensions,
block_dimensions,
make_exact_comparison<checked_value_type>
);
}
TEST_CASE("at_warp_stride")
{
using checked_value_type = uint32_t;
cuda::grid::dimensions_t num_grid_blocks { 1 };
cuda::grid::block_dimensions_t num_threads_per_block { warp_size * 3 }; // Meeting full warp constraint
auto num_grid_warps = num_grid_blocks.volume() * num_threads_per_block.volume() / warp_size;
size_t length_to_cover_per_warp { 71 };
// We don't actually create input data, we just need each element in the
// range 0 ... length_to_cover-1 to be attended to.
//
// In this test case - it's 0 ... length_to_cover-1 attended to by _each_ of the warps.
auto num_values_to_populate = length_to_cover_per_warp * num_grid_warps;
auto testcase_device_function =
[length_to_cover_per_warp] KAT_DEV (
size_t num_grid_threads,
checked_value_type* pos_attendent_thread_indices
)
{
namespace gi = kat::grid_info;
auto offset_into_attendant_array = length_to_cover_per_warp * gi::warp::id_in_block();
auto f_inner = [&] (size_t pos) {
pos_attendent_thread_indices[offset_into_attendant_array + pos] = gi::thread::id_in_grid();
};
kcw::at_warp_stride(length_to_cover_per_warp, f_inner);
};
auto expected_value_retriever = [=] (size_t pos) {
// Which thread processes position pos?
auto intra_warp_pos = pos % length_to_cover_per_warp;
auto processing_warp_index = pos / length_to_cover_per_warp;
auto processing_lane_index = intra_warp_pos % warp_size;
return checked_value_type(processing_lane_index + processing_warp_index * warp_size);
};
execute_non_uniform_testcase_on_gpu_and_check(
testcase_device_function,
expected_value_retriever,
num_values_to_populate, num_grid_blocks, num_threads_per_block,
make_exact_comparison<checked_value_type>
);
}
TEST_CASE("active_lanes_atomically_increment")
{
using incremeted_type = int; // TODO: Should we also test other types?
using checked_value_type = incremeted_type;
using predicate_type = bool;
cuda::grid::dimensions_t grid_dimensions { 2 };
cuda::grid::block_dimensions_t block_dimensions { warp_size * 5 };
auto num_grid_threads = grid_dimensions.volume() * block_dimensions.volume();
constexpr const incremeted_type invalid_value_for_inactives { -0xB7AB7A };
auto num_values_to_populate = num_grid_threads;
cuda::device_t device { cuda::device::current::get() };
auto device_side_increment_target { cuda::memory::device::make_unique<incremeted_type>(device) };
incremeted_type* device_side_increment_target_raw = device_side_increment_target.get();
cuda::memory::zero(device_side_increment_target_raw, sizeof(incremeted_type));
auto determine_whether_to_stay_active = [] KAT_HD (unsigned warp_id, unsigned lane_id)
{
switch (warp_id % 5) {
case 0: return predicate_type(0); // all go inactive
case 1: return predicate_type(1); // all stay active
case 2: return predicate_type(lane_id > 5); // lanes 6 and up stay active
case 3: return predicate_type(lane_id % 2); // odd lanes stay active
default: return predicate_type(lane_id != 7); // only lane 7 goes inactive
}
};
auto testcase_device_function =
[=] KAT_DEV (
size_t,
checked_value_type* thread_values_before_increment
)
{
namespace gi = kat::grid_info;
if (not determine_whether_to_stay_active(gi::warp::id_in_block(), gi::lane::id() )) {
thread_values_before_increment[gi::thread::global_id()] = invalid_value_for_inactives;
return;
}
thread_values_before_increment[gi::thread::global_id()] =
kcw::active_lanes_atomically_increment(device_side_increment_target_raw);
};
incremeted_type expected_num_active_threads { 0 };
for(incremeted_type global_thread_id = 0; global_thread_id < num_grid_threads; global_thread_id++) {
auto in_block_thread_id = global_thread_id % block_dimensions.volume();
auto warp_id = in_block_thread_id / warp_size;
auto lane_id = global_thread_id % warp_size;
if (determine_whether_to_stay_active(warp_id, lane_id)) { expected_num_active_threads++; }
}
auto thread_values_before_increment = execute_non_uniform_testcase_on_gpu(
tag<incremeted_type>{},
testcase_device_function,
num_values_to_populate,
grid_dimensions,
block_dimensions
);
incremeted_type final_incremented;
cuda::memory::copy_single(&final_incremented, device_side_increment_target_raw);
CHECK_MESSAGE(final_incremented == expected_num_active_threads,
"The increment target, originally 0, was not incremented by 1 for every thread in the test grid");
std::vector<incremeted_type> active_thread_values;
std::copy_if(thread_values_before_increment.cbegin(), thread_values_before_increment.cend(),
std::back_inserter(active_thread_values), [](incremeted_type x) { return x != invalid_value_for_inactives; });
std::sort(active_thread_values.begin(), active_thread_values.end());
for(size_t i = 0; i < active_thread_values.size(); i++) {
CHECK_MESSAGE(i == active_thread_values[i],
"Not every intermediate value between 0 and final incremented value appears as some thread's pre-increment value");
}
}
} // TEST_SUITE("warp-level - general grid")
TEST_SUITE("warp-level - linear grid") {
TEST_CASE("at_warp_stride")
{
using checked_value_type = uint32_t;
cuda::grid::dimension_t num_grid_blocks { 1 };
cuda::grid::block_dimension_t num_threads_per_block { warp_size * 3 }; // Meeting full warp constraint
auto num_grid_warps = num_grid_blocks * num_threads_per_block / warp_size;
size_t length_to_cover_per_warp { 71 };
// We don't actually create input data, we just need each element in the
// range 0 ... length_to_cover-1 to be attended to.
//
// In this test case - it's 0 ... length_to_cover-1 attended to by _each_ of the warps.
auto num_values_to_populate = length_to_cover_per_warp * num_grid_warps;
auto testcase_device_function =
[length_to_cover_per_warp] KAT_DEV (
size_t num_grid_threads,
checked_value_type* pos_attendent_thread_indices
)
{
namespace gi = kat::linear_grid::grid_info;
auto offset_into_attendant_array = length_to_cover_per_warp * gi::warp::id_in_block();
auto f_inner = [&] (size_t pos) {
pos_attendent_thread_indices[offset_into_attendant_array + pos] = gi::thread::id_in_grid();
};
klcw::at_warp_stride(length_to_cover_per_warp, f_inner);
};
auto expected_value_retriever = [=] (size_t pos) {
// Which thread processes position pos?
auto intra_warp_pos = pos % length_to_cover_per_warp;
auto processing_warp_index = pos / length_to_cover_per_warp;
auto processing_lane_index = intra_warp_pos % warp_size;
return checked_value_type(processing_lane_index + processing_warp_index * warp_size);
};
execute_non_uniform_testcase_on_gpu_and_check(
testcase_device_function,
expected_value_retriever,
num_values_to_populate, num_grid_blocks, num_threads_per_block,
make_exact_comparison<checked_value_type>
);
}
TEST_CASE("multisearch") {
/* using thread_value_type = int;
using checked_value_type = kat::linear_grid::collaborative::warp::search_result_t<thread_value_type>;
static_assert(std::is_trivially_constructible<checked_value_type>::value,
"search results are supposed to be trivially-constructible!");
cuda::grid::dimensions_t grid_dimensions { 1 };
cuda::grid::block_dimensions_t block_dimensions { warp_size * 7 };
auto num_total_threads = block_dimensions.volume() * grid_dimensions.volume();
auto num_values_to_populate = num_total_threads;
auto make_thread_value = [] KAT_HD (unsigned warp_id, unsigned lane_id)
{
switch (warp_id) {
case 0: return thread_value_type(-2);
case 1: return thread_value_type(2);
case 2: return thread_value_type(lane_id < 5 ? -2 : 2);
case 3: return thread_value_type(lane_id < 4 ? -2 : lane_id == 4 ? -1 : 2);
case 4: return thread_value_type(lane_id - warp_size/2);
case 5: return thread_value_type(lane_id == 0 ? -2 : 2);
case 6:
default: return thread_value_type(lane_id < warp_size - 1 ? -2 : 2);
}
};
auto make_search_value = [] KAT_HD (unsigned warp_id, unsigned lane_id)
{
return -2 + ((int) lane_id % 5);
};
auto testcase_device_function =
[=] KAT_DEV (
size_t,
checked_value_type* search_results
)
{
namespace gi = kat::grid_info;
auto haystack_straw = make_thread_value(gi::warp::id_in_block(), gi::lane::id());
thread_value_type needle_to_search_for = make_search_value(gi::warp::id_in_block(), gi::lane::id());
auto search_result = klcw::multisearch(needle_to_search_for, haystack_straw);
search_results[gi::thread::global_id()] = search_result;
};
auto expected_value_retriever = [=] (size_t global_thread_id) -> checked_value_type {
auto warp_id { (global_thread_id % block_dimensions.volume()) / warp_size };
auto lane_id { global_thread_id % warp_size };
auto needle = make_search_value(warp_id, lane_id);
thread_value_type warp_values[warp_size];
// checked_value_type warp_search_results[warp_size];
for(unsigned lane_id = 0; lane_id < warp_size; lane_id++) {
warp_values[lane_id] = make_thread_value(warp_id, lane_id);
}
auto ptr = std::find_if(warp_values, warp_values + warp_size,
[=](thread_value_type x) { return x <= needle; });
return {
unsigned(ptr - warp_values),
(ptr - warp_values == warp_size) ? 0 : *ptr
};
// return expected_search_result;
};
execute_non_uniform_testcase_on_gpu_and_check(
testcase_device_function,
expected_value_retriever,
num_values_to_populate,
grid_dimensions,
block_dimensions,
make_exact_comparison<checked_value_type>
);*/
}
TEST_CASE_TEMPLATE ("compute_predicate_at_warp_stride", SlackSetting,
std::integral_constant<klcw::detail::predicate_computation_length_slack_t, klcw::detail::predicate_computation_length_slack_t::may_have_full_warps_of_slack>,
std::integral_constant<klcw::detail::predicate_computation_length_slack_t, klcw::detail::predicate_computation_length_slack_t::has_no_slack>,
std::integral_constant<klcw::detail::predicate_computation_length_slack_t, klcw::detail::predicate_computation_length_slack_t::may_have_arbitrary_slack>
)
{
using checked_value_type = uint32_t; // but as a bit container
cuda::grid::dimension_t num_grid_blocks { 1 };
cuda::grid::block_dimension_t num_threads_per_block { warp_size * 3 }; // Meeting full warp constraint
auto num_grid_warps = num_grid_blocks * num_threads_per_block / warp_size;
size_t length_to_cover_per_warp;
switch (SlackSetting::value) {
case klcw::detail::predicate_computation_length_slack_t::has_no_slack:
length_to_cover_per_warp = warp_size * warp_size * 3;
break;
case klcw::detail::predicate_computation_length_slack_t::may_have_full_warps_of_slack:
length_to_cover_per_warp = warp_size * 3;
break;
case klcw::detail::predicate_computation_length_slack_t::may_have_arbitrary_slack:
default:
length_to_cover_per_warp = warp_size + 1;
break;
}
auto num_bit_containers_per_warp = ::div_rounding_up<uint32_t>(length_to_cover_per_warp, warp_size);
auto num_bit_containers = num_bit_containers_per_warp * num_grid_warps;
// Note that we populate a slack for every one of the warps' stretches
auto pos_attendants_length = length_to_cover_per_warp * num_grid_warps;
cuda::device_t device { cuda::device::current::get() };
auto device_side_pos_attendants { cuda::memory::device::make_unique<uint32_t[]>(device, pos_attendants_length) };
uint32_t* device_side_pos_attendants_raw = device_side_pos_attendants.get();
cuda::memory::zero(device_side_pos_attendants_raw, sizeof(uint32_t));
auto pred = [] KAT_HD (uint32_t intra_warp_stretch_pos) -> bool { return (intra_warp_stretch_pos % 7 == 0); };
auto testcase_device_function =
[=] KAT_DEV (
size_t num_grid_threads,
checked_value_type* computed_predicate
)
{
namespace gi = kat::linear_grid::grid_info;
auto instrumented_pred = [=] (size_t pos) -> bool {
device_side_pos_attendants_raw[gi::warp::global_id() * length_to_cover_per_warp + pos] = gi::thread::id_in_grid();
return pred(pos);
};
auto computed_predicate_for_this_warp = computed_predicate + num_bit_containers_per_warp * gi::warp::global_id();
klcw::compute_predicate_at_warp_stride(computed_predicate_for_this_warp, instrumented_pred, length_to_cover_per_warp);
};
auto expected_bit_container_retriever = [=] (uint32_t bit_container_pos) -> uint32_t {
auto intra_warp_stretch_bit_container_pos = bit_container_pos % num_bit_containers_per_warp;
checked_value_type bit_container { 0 };
for(auto lane_id = 0; lane_id < warp_size; lane_id++) {
auto intra_warp_pos = intra_warp_stretch_bit_container_pos * warp_size + lane_id;
if (intra_warp_pos < length_to_cover_per_warp) {
bit_container |= ( pred(intra_warp_pos) << lane_id );
}
}
return bit_container;
};
execute_non_uniform_testcase_on_gpu_and_check(
testcase_device_function,
expected_bit_container_retriever,
num_bit_containers, num_grid_blocks, num_threads_per_block,
make_exact_comparison<checked_value_type>
);
std::vector<uint32_t> pos_attendants(pos_attendants_length);
cuda::memory::copy(pos_attendants.data(), device_side_pos_attendants_raw, sizeof(uint32_t) * pos_attendants_length);
device.synchronize();
auto expected_attendant_retriever = [=] (uint32_t pos) -> uint32_t {
auto attending_warp = pos / length_to_cover_per_warp;
auto attending_lane = (pos % length_to_cover_per_warp) % warp_size;
return attending_warp * warp_size + attending_lane;
};
auto check_title = std::string(std::string("which thread attended which position in testcase ") + doctest::current_test_name());
check_results(
check_title,
pos_attendants_length,
pos_attendants.data(),
expected_attendant_retriever,
make_exact_comparison<uint32_t>);
}
/*
TEST_CASE("merge_sorted_half_warps - in-register")
{
using lane_value_type = int;
using checked_value_type = lane_value_type;
std::vector<lane_value_type> half_warps {
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30,
1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 31,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2,
};
constexpr const auto half_warp_size = warp_size / 2;
auto num_half_warps = half_warps.size() / half_warp_size;
cuda::device_t device { cuda::device::current::get() };
auto num_threads_per_block { warp_size *
std::min<cuda::grid::block_dimension_t>(device.properties().max_warps_per_block(), num_half_warps / 2) }; // Meeting full warp constraint
cuda::grid::dimension_t num_grid_blocks { std::max<cuda::grid::dimension_t>(1, (warp_size * num_half_warps) / num_threads_per_block) };
auto total_num_threads = num_threads_per_block * num_grid_blocks;
auto device_side_half_warps { cuda::memory::device::make_unique<lane_value_type[]>(device, half_warps.size()) };
auto device_side_half_warps_raw = device_side_half_warps.get();
cuda::memory::copy(device_side_half_warps_raw, half_warps.data(), sizeof(lane_value_type) * half_warps.size() );
auto testcase_device_function =
[=] KAT_DEV (
size_t,
checked_value_type* merged_data
)
{
namespace gi = kat::linear_grid::grid_info;
auto half_warp_pair_index = gi::warp::global_id(); // Each warp gets a different pair of half-warps to merge
auto first_half_warp_index = half_warp_pair_index / num_half_warps;
auto second_half_warp_index = half_warp_pair_index % num_half_warps;
auto my_half_warp_index = gi::lane::is_in_first_half_warp() ?
first_half_warp_index : second_half_warp_index;
lane_value_type lane_value = device_side_half_warps_raw[my_half_warp_index * half_warp_size + gi::lane::id()];
auto post_merge_value = klcw::merge_sorted_half_warps(lane_value);
// printf("Thread %3u had %2d now has %2d\n", gi::thread::global_id(), lane_value, post_merge_value);
merged_data[gi::thread::global_id()] = post_merge_value;
};
auto expected_value_retriever = [=] (size_t pos) {
auto half_warp_pair_index = pos / warp_size;
auto first_half_warp_index = half_warp_pair_index / num_half_warps;
auto first_half_warp_begin = half_warps.cbegin() + first_half_warp_index * half_warp_size;
auto second_half_warp_index = half_warp_pair_index % num_half_warps;
auto second_half_warp_begin = half_warps.cbegin() + second_half_warp_index * half_warp_size;
std::array<lane_value_type, warp_size> merged;
std::merge(
first_half_warp_begin, first_half_warp_begin + half_warp_size,
second_half_warp_begin, second_half_warp_begin + half_warp_size,
merged.begin());
auto lane_index = pos % warp_size;
return merged[lane_index];
};
execute_non_uniform_testcase_on_gpu_and_check(
testcase_device_function,
expected_value_retriever,
total_num_threads,
num_grid_blocks, num_threads_per_block,
make_exact_comparison<checked_value_type>
);
}
*/
} // TEST_SUITE("warp-level - linear grid")
|
the_stack
|
//#include <cstdio>
#include "caffe/fast_rcnn_layers.hpp"
#include "stdio.h"
using std::max;
using std::min;
namespace caffe {
//Definite equinox
template <typename Dtype>
__device__ inline float DexX(const Dtype* bottom_rois, int i_int, int j_int, const int pooled_height_int, const int pooled_width_int) {
Dtype i = float(i_int);
Dtype j = float(j_int);
Dtype pooled_width = float(pooled_width_int);
Dtype pooled_height = float(pooled_height_int);
return (pooled_height - i) / pooled_height * (
(pooled_width - j) / pooled_width * bottom_rois[1] + j / pooled_width * bottom_rois[3]) + i / pooled_height * (
(pooled_width - j) / pooled_width * bottom_rois[7] + j / pooled_width * bottom_rois[5]);
}
template <typename Dtype>
__device__ inline float DexY(const Dtype* bottom_rois, int i_int, int j_int, const int pooled_height_int, const int pooled_width_int) {
Dtype i = float(i_int);
Dtype j = float(j_int);
Dtype pooled_width = float(pooled_width_int);
Dtype pooled_height = float(pooled_height_int);
return (pooled_width - j) / pooled_width * (
(pooled_height - i) / pooled_height * bottom_rois[2] + i / pooled_height * bottom_rois[8]) + j / pooled_width * (
(pooled_height - i) / pooled_height * bottom_rois[4] + i / pooled_height * bottom_rois[6]);
}
template <typename Dtype>
__device__ inline Dtype cross_mul(Dtype *pt1,Dtype * pt2,Dtype *pt3){
return pt2[0]*pt3[1]+pt3[0]*pt1[1]+pt1[0]*pt2[1]-pt2[0]*pt1[1]-pt3[0]*pt2[1]-pt1[0]*pt3[1];
}
template <typename Dtype>
__device__ inline bool inpoly(Dtype pt_x, Dtype pt_y, Dtype * pts) {
bool flag = true;
int cur_sign;
Dtype pt[2];
pt[0] = pt_x;
pt[1] = pt_y;
int sign;
for(int i = 0 ;i<4;i++){
Dtype val = cross_mul(pts+i*2,pts+((i+1)%4*2),pt);
if(val<0.0f){
cur_sign = -1;
}else if(val>0.0f){
cur_sign = 1;
}else{
cur_sign =0;
}
if(cur_sign !=0){
if(flag){
flag = false;
sign = cur_sign;
}else{
if(sign!=cur_sign) return false;
}
}
}
return true;
}
template <typename Dtype>
__global__ void RotateROIAlignForward(const int nthreads, const Dtype* bottom_data,
const Dtype or_spatial_scale, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const Dtype* bottom_rois, Dtype* top_data, Dtype* con_idx_x, Dtype* con_idx_y,const Dtype* info) {
// The real spatial_scale should be depended on the true scale
Dtype im_height = info[0];
Dtype im_width = info[1];
Dtype spatial_scale_h = float(height) / im_height;
Dtype spatial_scale_w = float(width) / im_width;
//Dtype spatial_scale = (spatial_scale_w + spatial_scale_h) / 2.0;
int imageWidth = int(info[1]*spatial_scale_w+0.5);
int imageHeight = int(info[0]*spatial_scale_h+0.5);
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
// The input boxes are ready in the form of 4 pts in a array of length 8
bottom_rois += n * 9;
int roi_batch_ind = bottom_rois[0];
//Dtype cx = bottom_rois[1];
//Dtype cy = bottom_rois[2];
//Dtype h = bottom_rois[3];
//Dtype w = bottom_rois[4];
//Dtype angle = bottom_rois[5]/180.0*3.1415926535;
//TransformPrepare
//Dtype dx = -pooled_width/2.0;
//Dtype dy = -pooled_height/2.0;
//Dtype Sx = w*spatial_scale/pooled_width;
//Dtype Sy = h*spatial_scale/pooled_height;
//Dtype Alpha = cos(angle);
//Dtype Beta = sin(angle);
//Dtype Dx = cx*spatial_scale;
//Dtype Dy = cy*spatial_scale;
//Dtype M[2][3];
//M[0][0] = Alpha*Sx;
//M[0][1] = Beta*Sy;
//M[0][2] = Alpha*Sx*dx+Beta*Sy*dy+Dx;
//M[1][0] = -Beta*Sx;
//M[1][1] = Alpha*Sy;
//M[1][2] = -Beta*Sx*dx+Alpha*Sy*dy+Dy;
// order of lt, rt, rb, lb
Dtype P[8];
P[0] = DexX(bottom_rois, ph, pw, pooled_height, pooled_width) * spatial_scale_w;
P[1] = DexY(bottom_rois, ph, pw, pooled_height, pooled_width) * spatial_scale_h;
P[2] = DexX(bottom_rois, ph, pw + 1, pooled_height, pooled_width) * spatial_scale_w;
P[3] = DexY(bottom_rois, ph, pw + 1, pooled_height, pooled_width) * spatial_scale_h;
P[4] = DexX(bottom_rois, ph + 1, pw + 1, pooled_height, pooled_width) * spatial_scale_w;
P[5] = DexY(bottom_rois, ph + 1, pw + 1, pooled_height, pooled_width) * spatial_scale_h;
P[6] = DexX(bottom_rois, ph + 1, pw, pooled_height, pooled_width) * spatial_scale_w;
P[7] = DexY(bottom_rois, ph + 1, pw, pooled_height, pooled_width) * spatial_scale_h;
//int leftMost = int(max(round(min(min(P[0],P[2]),min(P[4],P[6]))),0.0));
//int rightMost= int(min(round(max(max(P[0],P[2]),max(P[4],P[6]))),imageWidth-1.0));
//int topMost= int(max(round(min(min(P[1],P[3]),min(P[5],P[7]))),0.0));
//int bottomMost= int(min(round(max(max(P[1],P[3]),max(P[5],P[7]))),imageHeight-1.0));
// Exact position on feature map in type float
Dtype leftMost = fmax(fmin(fmin(P[0],P[2]),fmin(P[4],P[6])),0.0);
Dtype rightMost = fmin(fmax(fmax(P[0],P[2]),fmax(P[4],P[6])),imageWidth-1.0);
Dtype topMost = fmax(fmin(fmin(P[1],P[3]),fmin(P[5],P[7])),0.0);
Dtype bottomMost = fmin(fmax(fmax(P[1],P[3]),fmax(P[5],P[7])),imageHeight-1.0);
float maxval = 0.0;
float max_con_x = -1.0;
float max_con_y = -1.0;
bottom_data += (roi_batch_ind * channels + c) * height * width;
//Dtype AB[2];
//AB[0] = P[2] - P[0];
//AB[1] = P[3] - P[1];
//Dtype ABAB = AB[0]*AB[0] + AB[1]*AB[1];
//Dtype AC[2];
//AC[0] = P[4] - P[0];
//AC[1] = P[5] - P[1];
//Dtype ACAC = AC[0]*AC[0] + AC[1]*AC[1];
Dtype h = topMost;
while (h < bottomMost+1) {
Dtype w = leftMost;
while (w < rightMost+1) {
if(inpoly(w, h, P)){
//Performing blinear interpolation
int bin_xs = int(floor(w));
int bin_ys = int(floor(h));
float rx = w - floor(w);
float ry = h - floor(w);
float wlt = (1.0 - rx) * (1.0 - ry);
float wrt = rx * (1.0 - ry);
float wrb = rx * ry;
float wlb = (1.0 - rx) * ry;
float inter_val = 0.0;
int min_x = min(max(bin_xs, 0), width - 1);
int min_y = min(max(bin_ys, 0), height - 1);
int max_x = max(min(bin_xs + 1, width - 1), 0);
int max_y = max(min(bin_ys + 1, height - 1), 0);
int lt = min_y * width + min_x;
int rt = min_y * width + max_x;
int rb = max_y * width + max_x;
int lb = max_y * width + max_x;
inter_val += bottom_data[lt] * wlt;
inter_val += bottom_data[rt] * wrt;
inter_val += bottom_data[rb] * wrb;
inter_val += bottom_data[lb] * wlb;
//inter_val = bottom_data[bin_ys * width + bin_xs];
if (inter_val > maxval) {
maxval = inter_val;
max_con_x = w;
max_con_y = h;
}
}
w = w + 1.0;
}
h = h + 1.0;
}
top_data[index] = maxval;
con_idx_x[index] = max_con_x;
con_idx_y[index] = max_con_y;
}
}
template <typename Dtype>
void RotateROIAlignLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_rois = bottom[1]->gpu_data();
const Dtype* cpu_bottom_data = bottom[0]->cpu_data();
std::cout<<bottom[0]->count()<<std::endl;
std::cout<<bottom[1]->count()<<std::endl;
std::cout<<bottom[2]->count()<<std::endl;
Dtype* top_data = top[0]->mutable_gpu_data();
//int* argmax_data = max_idx_.mutable_gpu_data();
Dtype* con_idx_x = continuous_idx_x.mutable_gpu_data();
Dtype* con_idx_y = continuous_idx_y.mutable_gpu_data();
std::cout<<"cpu_bottom_data"<<std::endl;
std::cout<<cpu_bottom_data[1]<<std::endl;
const Dtype* image_info = bottom[2]->gpu_data();
const Dtype* cpu_image_info = bottom[2]->cpu_data();
int count = top[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
std::cout<<"spatial_scale_"<<std::endl;
std::cout<<spatial_scale_<<std::endl;
std::cout<<(height_ / cpu_image_info[0])<<std::endl;
std::cout<<count<<std::endl;
std::cout<<bottom[0]->count()<<std::endl;
std::cout<<bottom[1]->count()<<std::endl;
std::cout<<bottom[2]->count()<<std::endl;
RotateROIAlignForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, spatial_scale_, channels_, height_, width_,
pooled_height_, pooled_width_, bottom_rois, top_data, con_idx_x, con_idx_y, image_info);
CUDA_POST_KERNEL_CHECK;
//const Dtype* top_gpu_data = top[0]->gpu_data();
//std::cout<<top_gpu_data[0]<<std::endl;
}
template <typename Dtype>
__global__ void RotateROIAlignBackward(const int nthreads, const Dtype* top_diff,
const Dtype* con_idx_x, const Dtype* con_idx_y, const int num_rois, const Dtype spatial_scale,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, Dtype* backbone_diff, Dtype* proposal_diff,
const Dtype* bottom_data, const Dtype* bottom_rois, const Dtype* info) {
CUDA_KERNEL_LOOP(index, nthreads) {
//backbone_diff is original bottom_diff && argmax_data decomposites to to parts of continuous coodinations
//And now we have a new branch to perform backprop
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
Dtype im_height = info[0];
Dtype im_width = info[1];
Dtype spatial_scale_h = float(height) / im_height;
Dtype spatial_scale_w = float(width) / im_width;
//Take an offset
bottom_rois += n * 9;
proposal_diff += n * 9;
int roi_batch_ind = bottom_rois[0];
backbone_diff += (roi_batch_ind * channels + c) * height * width;
bottom_data += (roi_batch_ind * channels + c) * height * width;
//////////////////// backbone branch //////////////////////
//Performing backprop for blinear interpolation
Dtype w = con_idx_x[index];
Dtype h = con_idx_y[index];
int bin_xs = int(floor(w));
int bin_ys = int(floor(h));
Dtype rx = w - float(bin_xs);
Dtype ry = h - float(bin_ys);
Dtype wlt = (1.0 - rx) * (1.0 - ry);
Dtype wrt = rx * (1.0 - ry);
Dtype wrb = rx * ry;
Dtype wlb = (1.0 - rx) * ry;
//Dtype inter_val = 0;
int min_x = min(max(bin_xs, 0), width - 1);
int min_y = min(max(bin_ys, 0), height - 1);
int max_x = max(min(bin_xs + 1, width - 1), 0);
int max_y = max(min(bin_ys + 1, height - 1), 0);
//if(bin_xs >= 0 && bin_ys >= 0) {
backbone_diff[min_y * width + min_x] += wlt * top_diff[index];
//}
//if(bin_xs + 1 < width && bin_ys >= 0) {
backbone_diff[min_y * width + max_x] += wrt * top_diff[index];
//}
//if(bin_xs + 1 < width && bin_ys + 1 < height) {
backbone_diff[max_y * width + max_x] += wrb * top_diff[index];
//}
//if(bin_xs >= 0 && bin_ys + 1 < height) {
backbone_diff[max_y * width + min_x] += wlb * top_diff[index];
//}
///////////////////////////////////////////////////////////////////////
///////////////////////// proposal branch ///////////////////////////
// pick the value from feature map when the coods are inside boundaries
//Dtype val_lt = (bin_xs >= 0 && bin_ys >= 0) ? bottom_data[bin_ys * width + bin_xs] : 0.0;
//Dtype val_rt = (bin_xs + 1 < width && bin_ys >= 0) ? bottom_data[bin_ys * width + (bin_xs + 1)] : 0.0;
//Dtype val_rb = (bin_xs + 1 < width && bin_ys + 1 < height) ? bottom_data[(bin_ys + 1) * width + (bin_xs + 1)] : 0.0;
//Dtype val_lb = (bin_xs >= 0 && bin_ys + 1 < height) ? bottom_data[(bin_ys + 1) * width + bin_xs] : 0.0;
Dtype val_lt = bottom_data[min_y * width + min_x];
Dtype val_rt = bottom_data[min_y * width + max_x];
Dtype val_rb = bottom_data[max_y * width + max_x];
Dtype val_lb = bottom_data[max_y * width + min_x];
// Compute the loss of h & w on pts of bilinear interpolation
Dtype d_wlt_w = -(1.0 - h + bin_ys);
Dtype d_wlt_h = -(1.0 - w + bin_xs);
Dtype d_wrt_w = (1.0 - h + bin_ys);
Dtype d_wrt_h = -(w - bin_xs);
Dtype d_wrb_w = (h - bin_ys);
Dtype d_wrb_h = (w - bin_xs);
Dtype d_wlb_w = -(h - bin_ys);
Dtype d_wlb_h = (1 - w + bin_xs);
Dtype dw = d_wlt_w * val_lt + d_wrt_w * val_rt + d_wrb_w * val_rb + d_wlb_w * val_lb;
Dtype dh = d_wlt_h * val_lt + d_wrt_h * val_rt + d_wrb_h * val_rb + d_wlb_h * val_lb;
Dtype loss_w = dw * top_diff[index];
Dtype loss_h = dh * top_diff[index];
// order of lt, rt, rb, lb
Dtype P[8];
P[0] = DexX(bottom_rois, ph, pw, pooled_height, pooled_width) * spatial_scale_w;;
P[1] = DexY(bottom_rois, ph, pw, pooled_height, pooled_width) * spatial_scale_h;;
P[2] = DexX(bottom_rois, ph, pw + 1, pooled_height, pooled_width) * spatial_scale_w;;
P[3] = DexY(bottom_rois, ph, pw + 1, pooled_height, pooled_width) * spatial_scale_h;
P[4] = DexX(bottom_rois, ph + 1, pw + 1, pooled_height, pooled_width) * spatial_scale_w;;
P[5] = DexY(bottom_rois, ph + 1, pw + 1, pooled_height, pooled_width) * spatial_scale_h;
P[6] = DexX(bottom_rois, ph + 1, pw, pooled_height, pooled_width) * spatial_scale_w;;
P[7] = DexY(bottom_rois, ph + 1, pw, pooled_height, pooled_width) * spatial_scale_h;
Dtype loss_P[8];
//backprop to the pts of pooling bin
loss_P[0] = (P[0] >= 0.0 && P[0] < P[2] && P[0] < P[4] && P[0] < P[6]) ? loss_w : 0.0;
loss_P[1] = (P[1] >= 0.0 && P[1] < P[3] && P[1] < P[5] && P[1] < P[7]) ? loss_h : 0.0;
loss_P[2] = (P[2] >= 0.0 && P[2] < P[0] && P[2] < P[4] && P[2] < P[6]) ? loss_w : 0.0;
loss_P[3] = (P[3] >= 0.0 && P[3] < P[1] && P[3] < P[5] && P[3] < P[7]) ? loss_h : 0.0;
loss_P[4] = (P[4] >= 0.0 && P[4] < P[0] && P[4] < P[2] && P[4] < P[6]) ? loss_w : 0.0;
loss_P[5] = (P[5] >= 0.0 && P[5] < P[1] && P[5] < P[3] && P[5] < P[7]) ? loss_h : 0.0;
loss_P[6] = (P[6] >= 0.0 && P[6] < P[0] && P[6] < P[2] && P[6] < P[4]) ? loss_w : 0.0;
loss_P[7] = (P[7] >= 0.0 && P[7] < P[1] && P[7] < P[3] && P[7] < P[5]) ? loss_h : 0.0;
int trigger_w = 0;
int trigger_h = 0;
//find x & y position
for(int i = 0;i < 4;i++) {
if(fabs(loss_P[i*2]) > 0.0) {
if(i*2 == 2 || i*2 == 4) {
trigger_w = 1;
}
}
if(fabs(loss_P[i*2+1]) > 0.0) {
if(i*2 == 5 || i*2 == 7) {
trigger_h = 1;
}
}
}
int h_idx = (trigger_h == 1) ? ph + 1 : ph;
int w_idx = (trigger_w == 1) ? pw + 1 : pw;
proposal_diff[1] += (float(pooled_height - h_idx) / pooled_height) * (float(pooled_width - w_idx) / pooled_width) * loss_w;
proposal_diff[2] += (float(pooled_width - w_idx) / pooled_width) * (float(pooled_height - h_idx) / pooled_height) * loss_h;
proposal_diff[3] += (float(pooled_height - h_idx) / pooled_height) * (float(w_idx) / pooled_width) * loss_w;
proposal_diff[4] += (float(w_idx) / pooled_width) * (float(pooled_height - h_idx) / pooled_height) * loss_h;
proposal_diff[5] += (float(h_idx) / pooled_height) * (float(w_idx) / pooled_width) * loss_w;
proposal_diff[6] += (float(w_idx) / pooled_width) * (float(h_idx) / pooled_height) * loss_h;
proposal_diff[7] += (float(h_idx) / pooled_height) * (float(pooled_width - w_idx) / pooled_width) * loss_w;
proposal_diff[8] += (float(pooled_width - w_idx) / pooled_width) * (float(h_idx) / pooled_height) * loss_h;
///////////////////////////////////////////////////////////////////////
//int bottom_index = argmax_data[index];
//if(bottom_index!=-1)
//backbone_diff[bottom_index]+=top_diff[index];
/**/
}
}
template <typename Dtype>
void RotateROIAlignLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_rois = bottom[1]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
//std::cout<<top_diff[0]<<std::endl;
Dtype* backbone_diff = bottom[0]->mutable_gpu_diff();
Dtype* proposal_diff = bottom[1]->mutable_gpu_diff();
//const int count = bottom[0]->count();
const int backbone_count = bottom[0]->count();
const int proposal_count = bottom[1]->count();
//caffe_gpu_set(count, Dtype(0.), backbone_diff);
caffe_gpu_set(backbone_count, Dtype(0.), backbone_diff);
caffe_gpu_set(proposal_count, Dtype(0.), proposal_diff);
//const int* argmax_data = max_idx_.gpu_data();
const Dtype* con_idx_x = continuous_idx_x.gpu_data();
const Dtype* con_idx_y = continuous_idx_y.gpu_data();
const Dtype* cpu_con_idx_x = continuous_idx_x.cpu_data();
const Dtype* cpu_con_idx_y = continuous_idx_y.cpu_data();
std::cout<<cpu_con_idx_x[0]<<std::endl;
std::cout<<cpu_con_idx_x[1]<<std::endl;
const Dtype* image_info = bottom[2]->gpu_data();
int counter = top[0]->count();
std::cout<<counter<<std::endl;
//NOLINT_NEXT_LINE(whitespace/operators)
RotateROIAlignBackward<Dtype><<<CAFFE_GET_BLOCKS(counter), CAFFE_CUDA_NUM_THREADS>>>(
backbone_count, top_diff, con_idx_x, con_idx_y, top[0]->num(), spatial_scale_, channels_,
height_, width_, pooled_height_, pooled_width_, backbone_diff, proposal_diff, bottom_data, bottom_rois, image_info);
CUDA_POST_KERNEL_CHECK;
//const Dtype* cpu_arr = top[0]->cpu_diff();
std::cout<<"Backprop down"<<std::endl;
}
INSTANTIATE_LAYER_GPU_FUNCS(RotateROIAlignLayer);
} // namespace caffe
|
the_stack
|
#include <claraparabricks/genomeworks/cudaaligner/aligner.hpp>
#include <claraparabricks/genomeworks/utils/signed_integer_utils.hpp>
#include <claraparabricks/genomeworks/utils/limits.cuh>
#include <claraparabricks/genomeworks/utils/mathutils.hpp>
#include <claraparabricks/genomeworks/utils/cudautils.hpp>
#include <claraparabricks/genomeworks/utils/allocator.hpp>
#include <claraparabricks/genomeworks/utils/device_buffer.hpp>
#include <cassert>
#include <climits>
#include <vector>
#include <numeric>
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#include <cuda/atomic>
#pragma GCC diagnostic pop
#include <cub/device/device_radix_sort.cuh>
#include <cub/util_type.cuh>
namespace claraparabricks
{
namespace genomeworks
{
namespace cudaaligner
{
constexpr int32_t warp_size = 32;
constexpr int32_t word_size = sizeof(myers::WordType) * CHAR_BIT;
namespace myers
{
constexpr int32_t initial_distance_guess_factor = 20;
__global__ void set_minus_one_to_zero(int32_t* array, int32_t n)
{
const int32_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n)
{
if (array[i] < 0)
array[i] = 0;
}
}
__global__ void init_atomics(cuda::atomic<int32_t, cuda::thread_scope_device>* path_start_atomic, cuda::atomic<int32_t, cuda::thread_scope_device>* scheduling_atomic, int32_t value)
{
// Safety-check for work-around for missing cuda::atomic_ref in libcu++ (see further below).
static_assert(sizeof(int32_t) == sizeof(cuda::atomic<int32_t, cuda::thread_scope_device>), "cuda::atomic<int32_t> needs to have the same size as int32_t.");
static_assert(alignof(int32_t) == alignof(cuda::atomic<int32_t, cuda::thread_scope_device>), "cuda::atomic<int32_t> needs to have the same alignment as int32_t.");
if (threadIdx.x == 0 && blockIdx.x == 0)
{
path_start_atomic->store(0, cuda::memory_order_relaxed);
scheduling_atomic->store(value, cuda::memory_order_relaxed);
}
}
inline __device__ WordType warp_leftshift_sync(uint32_t warp_mask, WordType v)
{
assert(((warp_mask >> (threadIdx.x % warp_size)) & 1u) == 1u);
// 4 threads, word_size = 4 example: thread 0 | thread 1 | thread 2 | thread 3
// v = 0101 | 0111 | 0011 | 1101 -> 1010 | 1110 | 0111 | 1010
const WordType x = __shfl_up_sync(warp_mask, v >> (word_size - 1), 1);
assert((x & ~WordType(1)) == 0);
v <<= 1;
if (threadIdx.x != 0)
v |= x;
return v;
}
inline __device__ WordType warp_rightshift_sync(uint32_t warp_mask, WordType v)
{
assert(((warp_mask >> (threadIdx.x % warp_size)) & 1u) == 1u);
// 4 threads, word_size = 4 example: thread 0 | thread 1 | thread 2 | thread 3
// v = 0101 | 0111 | 0011 | 1101 -> 0010 | 1011 | 1001 | 1110
const WordType x = __shfl_down_sync(warp_mask, v << (word_size - 1), 1);
assert((x & ~(WordType(1) << (word_size - 1))) == 0);
v >>= 1;
if ((warp_mask >> threadIdx.x) > 1u)
v |= x;
return v;
}
inline __device__ WordType warp_add_sync(uint32_t warp_mask, WordType a, WordType b)
{
static_assert(sizeof(WordType) == 4, "This function assumes WordType to have 4 bytes.");
static_assert(CHAR_BIT == 8, "This function assumes a char width of 8 bit.");
assert(((warp_mask >> (threadIdx.x % warp_size)) & 1u) == 1u);
const uint64_t ax = a;
const uint64_t bx = b;
uint64_t r = ax + bx;
uint32_t carry = static_cast<uint32_t>(r >> 32);
if (warp_mask == 1u)
{
return static_cast<WordType>(r);
}
r &= 0xffff'ffffull;
// TODO: I think due to the structure of the Myer blocks,
// a carry cannot propagate over more than a single block.
// I.e. a single carry propagation without the loop should be sufficient.
while (__any_sync(warp_mask, carry))
{
uint32_t x = __shfl_up_sync(warp_mask, carry, 1);
if (threadIdx.x != 0)
r += x;
carry = static_cast<uint32_t>(r >> 32);
r &= 0xffff'ffffull;
}
return static_cast<WordType>(r);
}
__device__ int32_t myers_advance_block(uint32_t warp_mask, WordType highest_bit, WordType eq, WordType& pv, WordType& mv, int32_t carry_in)
{
assert((pv & mv) == WordType(0));
// Stage 1
WordType xv = eq | mv;
if (carry_in < 0)
eq |= WordType(1);
WordType xh = warp_add_sync(warp_mask, eq & pv, pv);
xh = (xh ^ pv) | eq;
WordType ph = mv | (~(xh | pv));
WordType mh = pv & xh;
int32_t carry_out = ((ph & highest_bit) == WordType(0) ? 0 : 1) - ((mh & highest_bit) == WordType(0) ? 0 : 1);
ph = warp_leftshift_sync(warp_mask, ph);
mh = warp_leftshift_sync(warp_mask, mh);
if (carry_in < 0)
mh |= WordType(1);
if (carry_in > 0)
ph |= WordType(1);
// Stage 2
pv = mh | (~(xv | ph));
mv = ph & xv;
return carry_out;
}
__device__ int2 myers_advance_block2(uint32_t warp_mask, WordType highest_bit, WordType eq, WordType& pv, WordType& mv, int32_t carry_in)
{
assert((pv & mv) == WordType(0));
// Stage 1
WordType xv = eq | mv;
if (carry_in < 0)
eq |= WordType(1);
WordType xh = warp_add_sync(warp_mask, eq & pv, pv);
xh = (xh ^ pv) | eq;
WordType ph = mv | (~(xh | pv));
WordType mh = pv & xh;
int2 carry_out;
carry_out.x = ((ph & highest_bit) == WordType(0) ? 0 : 1) - ((mh & highest_bit) == WordType(0) ? 0 : 1);
carry_out.y = ((ph & (highest_bit << 1)) == WordType(0) ? 0 : 1) - ((mh & (highest_bit << 1)) == WordType(0) ? 0 : 1);
ph = warp_leftshift_sync(warp_mask, ph);
mh = warp_leftshift_sync(warp_mask, mh);
if (carry_in < 0)
mh |= WordType(1);
if (carry_in > 0)
ph |= WordType(1);
// Stage 2
pv = mh | (~(xv | ph));
mv = ph & xv;
return carry_out;
}
__device__ WordType myers_generate_query_pattern(char x, char const* query, int32_t query_size, int32_t offset)
{
// Sets a 1 bit at the position of every matching character
assert(offset < query_size);
const int32_t max_i = min(query_size - offset, word_size);
WordType r = 0;
for (int32_t i = 0; i < max_i; ++i)
{
if (x == query[i + offset])
r = r | (WordType(1) << i);
}
return r;
}
inline __device__ WordType get_query_pattern(device_matrix_view<WordType>& query_patterns, int32_t idx, int32_t query_begin_offset, char x, bool reverse)
{
static_assert(std::is_unsigned<WordType>::value, "WordType has to be an unsigned type for well-defined >> operations.");
assert(x >= 0);
assert(x == 'A' || x == 'C' || x == 'G' || x == 'T');
const int32_t char_idx = (x >> 1) & 0x3u; // [A,C,T,G] -> [0,1,2,3]
// 4-bit word example:
// query_patterns contains character match bit patterns "XXXX" for the full query string.
// we want the bit pattern "yyyy" for a view of on the query string starting at eg. character 11:
// 4 3 2 1 0 (pattern index)
// XXXX XXXX XXXX [XXXX] [XXXX]
// YYY Yyyy y
// 1 0 (idx)
//
// query_begin_offset = 11
// => idx_offset = 11/4 = 2, shift = 11%4 = 3
const int32_t idx_offset = query_begin_offset / word_size;
const int32_t shift = query_begin_offset % word_size;
WordType r = query_patterns(idx + idx_offset, char_idx);
if (shift != 0)
{
r >>= shift;
if (idx + idx_offset + 1 < query_patterns.num_rows())
{
r |= query_patterns(idx + idx_offset + 1, char_idx) << (word_size - shift);
}
}
return r;
}
inline __device__ int32_t get_myers_score(int32_t i, int32_t j, device_matrix_view<WordType> const& pv, device_matrix_view<WordType> const& mv, device_matrix_view<int32_t> const& score, WordType last_entry_mask)
{
assert(i > 0); // row 0 is implicit, NW matrix is shifted by i -> i-1
const int32_t word_idx = (i - 1) / word_size;
const int32_t bit_idx = (i - 1) % word_size;
int32_t s = score(word_idx, j);
WordType mask = (~WordType(1)) << bit_idx;
if (word_idx == score.num_rows() - 1)
mask &= last_entry_mask;
s -= __popc(mask & pv(word_idx, j));
s += __popc(mask & mv(word_idx, j));
return s;
}
__device__ void myers_backtrace(int8_t* paths_base, int32_t* lengths, int32_t max_path_length, device_matrix_view<WordType> const& pv, device_matrix_view<WordType> const& mv, device_matrix_view<int32_t> const& score, int32_t query_size, int32_t id)
{
using nw_score_t = int32_t;
assert(pv.num_rows() == score.num_rows());
assert(mv.num_rows() == score.num_rows());
assert(pv.num_cols() == score.num_cols());
assert(mv.num_cols() == score.num_cols());
assert(score.num_rows() == ceiling_divide(query_size, word_size));
int32_t i = query_size;
int32_t j = score.num_cols() - 1;
int8_t* path = paths_base + id * static_cast<ptrdiff_t>(max_path_length);
const WordType last_entry_mask = query_size % word_size != 0 ? (WordType(1) << (query_size % word_size)) - 1 : ~WordType(0);
nw_score_t myscore = i > 0 ? score((i - 1) / word_size, j) : 0; // row 0 is implicit, NW matrix is shifted by i -> i-1 (see get_myers_score)
int32_t pos = 0;
while (i > 0 && j > 0)
{
int8_t r = 0;
nw_score_t const above = i == 1 ? j : get_myers_score(i - 1, j, pv, mv, score, last_entry_mask);
nw_score_t const diag = i == 1 ? j - 1 : get_myers_score(i - 1, j - 1, pv, mv, score, last_entry_mask);
nw_score_t const left = get_myers_score(i, j - 1, pv, mv, score, last_entry_mask);
if (left + 1 == myscore)
{
r = static_cast<int8_t>(AlignmentState::insertion);
myscore = left;
--j;
}
else if (above + 1 == myscore)
{
r = static_cast<int8_t>(AlignmentState::deletion);
myscore = above;
--i;
}
else
{
r = (diag == myscore ? static_cast<int8_t>(AlignmentState::match) : static_cast<int8_t>(AlignmentState::mismatch));
myscore = diag;
--i;
--j;
}
path[pos] = r;
++pos;
}
while (i > 0)
{
path[pos] = static_cast<int8_t>(AlignmentState::deletion);
++pos;
--i;
}
while (j > 0)
{
path[pos] = static_cast<int8_t>(AlignmentState::insertion);
++pos;
--j;
}
lengths[id] = pos;
}
__global__ void myers_backtrace_kernel(int8_t* paths_base, int32_t* lengths, int32_t max_path_length,
batched_device_matrices<WordType>::device_interface* pvi,
batched_device_matrices<WordType>::device_interface* mvi,
batched_device_matrices<int32_t>::device_interface* scorei,
int32_t const* sequence_lengths_d,
int32_t n_alignments)
{
const int32_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= n_alignments)
return;
GW_CONSTEXPR int32_t word_size = sizeof(WordType) * CHAR_BIT;
const int32_t query_size = sequence_lengths_d[2 * idx];
const int32_t target_size = sequence_lengths_d[2 * idx + 1];
const int32_t n_words = (query_size + word_size - 1) / word_size;
const device_matrix_view<WordType> pv = pvi->get_matrix_view(idx, n_words, target_size + 1);
const device_matrix_view<WordType> mv = mvi->get_matrix_view(idx, n_words, target_size + 1);
const device_matrix_view<int32_t> score = scorei->get_matrix_view(idx, n_words, target_size + 1);
myers_backtrace(paths_base, lengths, max_path_length, pv, mv, score, query_size, idx);
}
__global__ void myers_convert_to_full_score_matrix_kernel(batched_device_matrices<int32_t>::device_interface* fullscorei,
batched_device_matrices<WordType>::device_interface* pvi,
batched_device_matrices<WordType>::device_interface* mvi,
batched_device_matrices<int32_t>::device_interface* scorei,
int32_t const* sequence_lengths_d,
int32_t alignment)
{
GW_CONSTEXPR int32_t word_size = sizeof(WordType) * CHAR_BIT;
const int32_t query_size = sequence_lengths_d[2 * alignment];
const int32_t target_size = sequence_lengths_d[2 * alignment + 1];
const int32_t n_words = (query_size + word_size - 1) / word_size;
assert(query_size > 0);
int32_t i = blockIdx.x * blockDim.x + threadIdx.x;
int32_t j = blockIdx.y * blockDim.y + threadIdx.y;
if (j < target_size + 1 && i < query_size + 1)
{
const WordType last_entry_mask = query_size % word_size != 0 ? (WordType(1) << (query_size % word_size)) - 1 : ~WordType(0);
device_matrix_view<WordType> pv = pvi->get_matrix_view(0, n_words, target_size + 1);
device_matrix_view<WordType> mv = mvi->get_matrix_view(0, n_words, target_size + 1);
device_matrix_view<int32_t> score = scorei->get_matrix_view(0, n_words, target_size + 1);
device_matrix_view<int32_t> fullscore = fullscorei->get_matrix_view(0, query_size + 1, target_size + 1);
int32_t myscore = 0;
if (i == 0)
myscore = j;
else
myscore = get_myers_score(i, j, pv, mv, score, last_entry_mask);
fullscore(i, j) = myscore;
}
}
__global__ void myers_compute_score_matrix_kernel(
batched_device_matrices<WordType>::device_interface* pvi,
batched_device_matrices<WordType>::device_interface* mvi,
batched_device_matrices<int32_t>::device_interface* scorei,
batched_device_matrices<WordType>::device_interface* query_patternsi,
char const* sequences_d, int32_t const* sequence_lengths_d,
int32_t max_sequence_length,
int32_t n_alignments)
{
GW_CONSTEXPR int32_t word_size = sizeof(WordType) * CHAR_BIT;
GW_CONSTEXPR int32_t warp_size = 32;
assert(warpSize == warp_size);
assert(threadIdx.x < warp_size);
const int32_t alignment_idx = blockIdx.x;
if (alignment_idx >= n_alignments)
return;
const int32_t query_size = sequence_lengths_d[2 * alignment_idx];
const int32_t target_size = sequence_lengths_d[2 * alignment_idx + 1];
const char* const query = sequences_d + 2 * alignment_idx * max_sequence_length;
const char* const target = sequences_d + (2 * alignment_idx + 1) * max_sequence_length;
const int32_t n_words = (query_size + word_size - 1) / word_size;
const int32_t n_warp_iterations = ceiling_divide(n_words, warp_size) * warp_size;
device_matrix_view<WordType> pv = pvi->get_matrix_view(alignment_idx, n_words, target_size + 1);
device_matrix_view<WordType> mv = mvi->get_matrix_view(alignment_idx, n_words, target_size + 1);
device_matrix_view<int32_t> score = scorei->get_matrix_view(alignment_idx, n_words, target_size + 1);
device_matrix_view<WordType> query_patterns = query_patternsi->get_matrix_view(alignment_idx, n_words, 4);
for (int32_t idx = threadIdx.x; idx < n_words; idx += warp_size)
{
pv(idx, 0) = ~WordType(0);
mv(idx, 0) = 0;
score(idx, 0) = min((idx + 1) * word_size, query_size);
// TODO query load is inefficient
query_patterns(idx, 0) = myers_generate_query_pattern('A', query, query_size, idx * word_size);
query_patterns(idx, 1) = myers_generate_query_pattern('C', query, query_size, idx * word_size);
query_patterns(idx, 2) = myers_generate_query_pattern('T', query, query_size, idx * word_size);
query_patterns(idx, 3) = myers_generate_query_pattern('G', query, query_size, idx * word_size);
}
__syncwarp();
for (int32_t t = 1; t <= target_size; ++t)
{
int32_t warp_carry = 0;
if (threadIdx.x == 0)
warp_carry = 1; // for global alignment the (implicit) first row has to be 0,1,2,3,... -> carry 1
for (int32_t idx = threadIdx.x; idx < n_warp_iterations; idx += warp_size)
{
if (idx < n_words)
{
const uint32_t warp_mask = idx / warp_size < n_words / warp_size ? 0xffff'ffffu : (1u << (n_words % warp_size)) - 1;
WordType pv_local = pv(idx, t - 1);
WordType mv_local = mv(idx, t - 1);
const WordType highest_bit = WordType(1) << (idx == (n_words - 1) ? query_size - (n_words - 1) * word_size - 1 : word_size - 1);
const WordType eq = get_query_pattern(query_patterns, idx, 0, target[t - 1], false);
warp_carry = myers_advance_block(warp_mask, highest_bit, eq, pv_local, mv_local, warp_carry);
score(idx, t) = score(idx, t - 1) + warp_carry;
if (threadIdx.x == 0)
warp_carry = 0;
if (warp_mask == 0xffff'ffffu && (threadIdx.x == 31 || threadIdx.x == 0))
warp_carry = __shfl_down_sync(0x8000'0001u, warp_carry, warp_size - 1);
if (threadIdx.x != 0)
warp_carry = 0;
pv(idx, t) = pv_local;
mv(idx, t) = mv_local;
}
__syncwarp();
}
}
}
__device__ int32_t myers_backtrace_banded(int8_t* path, int32_t* const path_count, device_matrix_view<WordType> const& pv, device_matrix_view<WordType> const& mv, device_matrix_view<int32_t> const& score, int32_t diagonal_begin, int32_t diagonal_end, int32_t band_width, int32_t target_size, int32_t query_size)
{
assert(threadIdx.x == 0);
using nw_score_t = int32_t;
GW_CONSTEXPR nw_score_t out_of_band = numeric_limits<nw_score_t>::max() - 1; // -1 to avoid integer overflow further down.
assert(pv.num_rows() == score.num_rows());
assert(mv.num_rows() == score.num_rows());
assert(pv.num_cols() == score.num_cols());
assert(mv.num_cols() == score.num_cols());
assert(score.num_rows() == ceiling_divide(band_width, word_size));
assert(diagonal_begin >= 0);
assert(diagonal_end >= diagonal_begin);
assert(diagonal_end >= 2); // this should only break if target_size == 0 - which is not valid input.
assert(band_width > 0 || query_size == 0);
int32_t i = band_width;
int32_t j = target_size;
const WordType last_entry_mask = band_width % word_size != 0 ? (WordType(1) << (band_width % word_size)) - 1 : ~WordType(0);
const nw_score_t last_diagonal_score = diagonal_end < 2 ? out_of_band : get_myers_score(1, diagonal_end - 2, pv, mv, score, last_entry_mask) + 2;
nw_score_t myscore = i > 0 ? score((i - 1) / word_size, j) : 0; // row 0 is implicit, NW matrix is shifted by i -> i-1, i.e. i \in [1,band_width] for get_myers_score. (see get_myers_score)
int32_t pos = 0;
int8_t prev_r = -1;
int32_t r_count = 0;
while (j >= diagonal_end)
{
int8_t r = 0;
// Worst case for the implicit top row (i == 0) of the bottom right block of the NW is the last diagonal entry on the same row + (j - diagonal_end) * indel cost.
nw_score_t const above = i <= 1 ? (last_diagonal_score + j - diagonal_end) : get_myers_score(i - 1, j, pv, mv, score, last_entry_mask);
nw_score_t const diag = i <= 1 ? (last_diagonal_score + j - 1 - diagonal_end) : get_myers_score(i - 1, j - 1, pv, mv, score, last_entry_mask);
nw_score_t const left = i < 1 ? (last_diagonal_score + j - 1 - diagonal_end) : get_myers_score(i, j - 1, pv, mv, score, last_entry_mask);
if (left + 1 == myscore)
{
r = static_cast<int8_t>(AlignmentState::insertion);
myscore = left;
--j;
}
else if (above + 1 == myscore)
{
r = static_cast<int8_t>(AlignmentState::deletion);
myscore = above;
--i;
}
else
{
assert(diag == myscore || diag + 1 == myscore);
r = (diag == myscore ? static_cast<int8_t>(AlignmentState::match) : static_cast<int8_t>(AlignmentState::mismatch));
myscore = diag;
--i;
--j;
}
if (prev_r != r)
{
if (prev_r != -1)
{
path[pos] = prev_r;
path_count[pos] = r_count;
++pos;
}
prev_r = r;
r_count = 0;
}
++r_count;
}
while (j >= diagonal_begin)
{
int8_t r = 0;
nw_score_t const above = i <= 1 ? out_of_band : get_myers_score(i - 1, j, pv, mv, score, last_entry_mask);
nw_score_t const diag = i <= 0 ? j - 1 : get_myers_score(i, j - 1, pv, mv, score, last_entry_mask);
nw_score_t const left = i >= band_width ? out_of_band : get_myers_score(i + 1, j - 1, pv, mv, score, last_entry_mask);
// out-of-band cases: diag always preferrable, since worst-case-(above|left) - myscore >= diag - myscore always holds.
if (left + 1 == myscore)
{
r = static_cast<int8_t>(AlignmentState::insertion);
myscore = left;
++i;
--j;
}
else if (above + 1 == myscore)
{
r = static_cast<int8_t>(AlignmentState::deletion);
myscore = above;
--i;
}
else
{
assert(diag == myscore || diag + 1 == myscore);
r = (diag == myscore ? static_cast<int8_t>(AlignmentState::match) : static_cast<int8_t>(AlignmentState::mismatch));
myscore = diag;
--j;
}
if (prev_r != r)
{
if (prev_r != -1)
{
path[pos] = prev_r;
path_count[pos] = r_count;
++pos;
}
prev_r = r;
r_count = 0;
}
++r_count;
}
while (i > 0 && j > 0)
{
int8_t r = 0;
nw_score_t const above = i == 1 ? j : get_myers_score(i - 1, j, pv, mv, score, last_entry_mask);
nw_score_t const diag = i == 1 ? j - 1 : get_myers_score(i - 1, j - 1, pv, mv, score, last_entry_mask);
nw_score_t const left = i > band_width ? out_of_band : get_myers_score(i, j - 1, pv, mv, score, last_entry_mask);
// out-of-band cases: diag always preferrable, since worst-case-(above|left) - myscore >= diag - myscore always holds.
if (left + 1 == myscore)
{
r = static_cast<int8_t>(AlignmentState::insertion);
myscore = left;
--j;
}
else if (above + 1 == myscore)
{
r = static_cast<int8_t>(AlignmentState::deletion);
myscore = above;
--i;
}
else
{
assert(diag == myscore || diag + 1 == myscore);
r = (diag == myscore ? static_cast<int8_t>(AlignmentState::match) : static_cast<int8_t>(AlignmentState::mismatch));
myscore = diag;
--i;
--j;
}
if (prev_r != r)
{
if (prev_r != -1)
{
path[pos] = prev_r;
path_count[pos] = r_count;
++pos;
}
prev_r = r;
r_count = 0;
}
++r_count;
}
if (i > 0)
{
if (prev_r != static_cast<int8_t>(AlignmentState::deletion))
{
if (prev_r != -1)
{
path[pos] = prev_r;
path_count[pos] = r_count;
++pos;
}
prev_r = static_cast<int8_t>(AlignmentState::deletion);
r_count = 0;
}
r_count += i;
}
if (j > 0)
{
if (prev_r != static_cast<int8_t>(AlignmentState::insertion))
{
if (prev_r != -1)
{
path[pos] = prev_r;
path_count[pos] = r_count;
++pos;
}
prev_r = static_cast<int8_t>(AlignmentState::insertion);
r_count = 0;
}
r_count += j;
}
if (r_count != 0)
{
assert(prev_r != -1);
path[pos] = prev_r;
path_count[pos] = r_count;
++pos;
}
return pos;
}
__device__ void myers_compute_scores_horizontal_band_impl(
device_matrix_view<WordType>& pv,
device_matrix_view<WordType>& mv,
device_matrix_view<int32_t>& score,
device_matrix_view<WordType>& query_patterns,
char const* target_begin,
char const* query_begin,
const int32_t t_begin,
const int32_t t_end,
const int32_t width,
const int32_t n_words,
const int32_t pattern_idx_offset)
{
assert(n_words == ceiling_divide(width, word_size));
assert(t_begin <= t_end);
const int32_t n_warp_iterations = ceiling_divide(n_words, warp_size) * warp_size;
for (int32_t t = t_begin; t < t_end; ++t)
{
int32_t warp_carry = 0;
if (threadIdx.x == 0)
warp_carry = 1; // worst case for the top boarder of the band
for (int32_t idx = threadIdx.x; idx < n_warp_iterations; idx += warp_size)
{
if (idx < n_words)
{
const uint32_t warp_mask = idx / warp_size < n_words / warp_size ? 0xffff'ffffu : (1u << (n_words % warp_size)) - 1;
WordType pv_local = pv(idx, t - 1);
WordType mv_local = mv(idx, t - 1);
const WordType highest_bit = WordType(1) << (idx == (n_words - 1) ? width - (n_words - 1) * word_size - 1 : word_size - 1);
const WordType eq = get_query_pattern(query_patterns, idx, pattern_idx_offset, target_begin[t - 1], false);
warp_carry = myers_advance_block(warp_mask, highest_bit, eq, pv_local, mv_local, warp_carry);
score(idx, t) = score(idx, t - 1) + warp_carry;
if (threadIdx.x == 0)
warp_carry = 0;
if (warp_mask == 0xffff'ffffu && (threadIdx.x == 0 || threadIdx.x == 31))
warp_carry = __shfl_down_sync(0x8000'0001u, warp_carry, warp_size - 1);
if (threadIdx.x != 0)
warp_carry = 0;
pv(idx, t) = pv_local;
mv(idx, t) = mv_local;
}
__syncwarp();
}
}
}
__device__ void myers_compute_scores_diagonal_band_impl(
device_matrix_view<WordType>& pv,
device_matrix_view<WordType>& mv,
device_matrix_view<int32_t>& score,
device_matrix_view<WordType>& query_patterns,
char const* target_begin,
char const* query_begin,
const int32_t t_begin,
const int32_t t_end,
const int32_t band_width,
const int32_t n_words_band,
const int32_t pattern_idx_offset)
{
assert(n_words_band == ceiling_divide(band_width, warp_size));
assert(band_width - (n_words_band - 1) * word_size >= 2); // we need at least two bits in the last word
const int32_t n_warp_iterations = ceiling_divide(n_words_band, warp_size) * warp_size;
for (int32_t t = t_begin; t < t_end; ++t)
{
int32_t carry = 0;
if (threadIdx.x == 0)
carry = 1; // worst case for the top boarder of the band
for (int32_t idx = threadIdx.x; idx < n_warp_iterations; idx += warp_size)
{
// idx within band column
const uint32_t warp_mask = idx / warp_size < n_words_band / warp_size ? 0xffff'ffffu : (1u << (n_words_band % warp_size)) - 1;
if (idx < n_words_band)
{
// data from the previous column
WordType pv_local = warp_rightshift_sync(warp_mask, pv(idx, t - 1));
WordType mv_local = warp_rightshift_sync(warp_mask, mv(idx, t - 1));
if (threadIdx.x == 31 && warp_mask == 0xffff'ffffu)
{
if (idx < n_words_band - 1)
{
pv_local |= pv(idx + 1, t - 1) << (word_size - 1);
mv_local |= mv(idx + 1, t - 1) << (word_size - 1);
}
}
const WordType eq = get_query_pattern(query_patterns, idx, pattern_idx_offset + t - t_begin + 1, target_begin[t - 1], false);
const WordType delta_right_bit = WordType(1) << (idx == (n_words_band - 1) ? band_width - (n_words_band - 1) * word_size - 2 : word_size - 2);
const WordType delta_down_bit = delta_right_bit << 1;
assert(delta_down_bit != 0);
if (idx == n_words_band - 1)
{
// bits who have no left neighbor -> assume worst case: +1
pv_local |= delta_down_bit;
mv_local &= ~delta_down_bit;
}
const int2 delta_right = myers_advance_block2(warp_mask, delta_right_bit, eq, pv_local, mv_local, carry);
const int32_t delta_down = ((pv_local & delta_down_bit) == WordType(0) ? 0 : 1) - ((mv_local & delta_down_bit) == WordType(0) ? 0 : 1);
// Since idx is relative to diagonal band, (idx, t-1) -> (idx,t)
// corresponds to (n-1,t-1) -> (n,t) in the NW matrix.
// To get from score'(n-1, t-1) -> score'(n, t-1)
// add horizontal delta in row n-1 (delta_right.x)
// and the vertical delta in column t (delta_down).
score(idx, t) = score(idx, t - 1) + delta_right.x + delta_down;
// Carry horizontal delta in row n (= delta_right.y) to next warp iteration
if (threadIdx.x == 0)
carry = 0;
if (warp_mask == 0xffff'ffffu && (threadIdx.x == 0 || threadIdx.x == 31))
carry = __shfl_down_sync(0x8000'0001u, delta_right.y, warp_size - 1);
if (threadIdx.x != 0)
carry = 0;
pv(idx, t) = pv_local;
mv(idx, t) = mv_local;
}
__syncwarp();
}
}
}
__device__ void
myers_compute_scores_edit_dist_banded(
int32_t& diagonal_begin,
int32_t& diagonal_end,
device_matrix_view<WordType>& pv,
device_matrix_view<WordType>& mv,
device_matrix_view<int32_t>& score,
device_matrix_view<WordType>& query_patterns,
char const* target_begin,
char const* query_begin,
int32_t const target_size,
int32_t const query_size,
int32_t const band_width,
int32_t const n_words_band,
int32_t const p)
{
// Note: 0-th row of the NW matrix is implicit for pv, mv and score! (given by the inital warp_carry)
assert(warpSize == warp_size);
assert(threadIdx.x < warp_size);
assert(target_size > 0);
assert(query_size >= 0);
assert(band_width > 0 || query_size == 0); // might even be ok for band_width = 0 - haven't checked.
assert(n_words_band > 0 || query_size == 0);
assert(p >= 0);
assert(pv.num_rows() == n_words_band);
assert(mv.num_rows() == n_words_band);
assert(score.num_rows() == n_words_band);
assert(pv.num_cols() == target_size + 1);
assert(mv.num_cols() == target_size + 1);
assert(score.num_cols() == target_size + 1);
for (int32_t idx = threadIdx.x; idx < n_words_band; idx += warp_size)
{
pv(idx, 0) = ~WordType(0);
mv(idx, 0) = 0;
score(idx, 0) = min((idx + 1) * word_size, band_width);
}
__syncwarp();
// This function computes a diagonal band of the NW matrix (Ukkonen algorithm).
// In essence it computes the diagonals [-p, ..., 0, ..., p + target_size - query_size] (for query_size < target_size),
// where diagonal -p starts at m(p,0), and p + target_size - query_size starts at m(0,p+target_size-query_size)
// using Myers bit-vector algorithm with a word size of warp_size * sizeof(WordType).
//
// band_width is the width of this band = 1 + 2*p + abs(target_size - query_size).
//
// Note that for query_size >= target_size the diagonals [-p - (query_size - target_size), ..., 0, ..., p] are used.
// This implementation computes the matrix band column by column.
// To ease implementation band_width elements per column are computed for every column,
// even though they are not needed for the first few and last few columns.
//
// In more detail: instead of just computing the diagonals:
//
// \\\\\00000|
// \\\\\\0000| target_size=9, query_size=7, p=1
// 0\\\\\\000|
// 00\\\\\\00| ("|" has no meaning - just to avoid multi-line comments with trailing"\")
// 000\\\\\\0|
// 0000\\\\\\|
// 00000\\\\\|
//
// we compute horizontal stripes with n=band_width rows at the beginning and at the end.
// Only the range [diagonal_begin,diagonal_end)
//
// ----\00000|
// ----\\0000|
// ----\\----|
// ----\\----|
// ----\\----|
// 0000\\----|
// 00000\----|
if (band_width >= query_size)
{
// If the band_width is larger than the query_size just do a full Myers
// i.e. do only one large horizontal stripe of width query_size.
diagonal_begin = target_size + 1;
diagonal_end = target_size + 1;
myers_compute_scores_horizontal_band_impl(pv, mv, score, query_patterns, target_begin, query_begin, 1, target_size + 1, query_size, n_words_band, 0);
}
else
{
const int32_t symmetric_band = (band_width - min(1 + 2 * p + abs(target_size - query_size), query_size) == 0) ? 1 : 0;
diagonal_begin = query_size < target_size ? target_size - query_size + p + 2 : p + 2 + (1 - symmetric_band);
diagonal_end = query_size < target_size ? query_size - p + symmetric_band : query_size - (query_size - target_size) - p + 1;
myers_compute_scores_horizontal_band_impl(pv, mv, score, query_patterns, target_begin, query_begin, 1, diagonal_begin, band_width, n_words_band, 0);
myers_compute_scores_diagonal_band_impl(pv, mv, score, query_patterns, target_begin, query_begin, diagonal_begin, diagonal_end, band_width, n_words_band, 0);
myers_compute_scores_horizontal_band_impl(pv, mv, score, query_patterns, target_begin, query_begin, diagonal_end, target_size + 1, band_width, n_words_band, query_size - band_width);
}
}
__device__ int32_t get_alignment_task(const int32_t* scheduling_index_d, cuda::atomic<int32_t, cuda::thread_scope_device>* scheduling_atomic_d, const int32_t n_alignments)
{
// Fetch the index of the next alignment to be processed.
// A full warp operates on the same alignment, i.e.
// the whole warp gets the same alignment index.
int32_t sched_idx = 0;
if (threadIdx.x == 0)
{
sched_idx = scheduling_atomic_d->fetch_add(1, cuda::memory_order_relaxed);
}
sched_idx = __shfl_sync(0xffff'ffffu, sched_idx, 0);
return sched_idx < n_alignments ? scheduling_index_d[sched_idx] : n_alignments;
}
__global__ void myers_banded_kernel(
int8_t* const paths_output,
int32_t* const path_counts_output,
int32_t* const path_starts,
uint32_t* const path_metadata,
cuda::atomic<int32_t, cuda::thread_scope_device>* path_start_atomic_d,
batched_device_matrices<WordType>::device_interface* pvi,
batched_device_matrices<WordType>::device_interface* mvi,
batched_device_matrices<int32_t>::device_interface* scorei,
batched_device_matrices<WordType>::device_interface* query_patternsi,
int8_t* path_buffer,
int32_t* path_counts_buffer,
char const* sequences_d, int64_t const* sequence_starts_d, int32_t const* max_bandwidths_d,
const int32_t* scheduling_index_d, cuda::atomic<int32_t, cuda::thread_scope_device>* scheduling_atomic_d,
const int32_t path_buffer_size,
const int32_t n_large_workspaces,
const int32_t n_alignments)
{
assert(warpSize == warp_size);
assert(threadIdx.x < warp_size);
if (blockIdx.x >= n_alignments)
return;
int32_t alignment_idx = 0;
if (blockIdx.x < n_large_workspaces)
{
alignment_idx = scheduling_index_d[blockIdx.x];
}
else
{
alignment_idx = get_alignment_task(scheduling_index_d, scheduling_atomic_d, n_alignments);
}
while (alignment_idx < n_alignments)
{
const char* const query = sequences_d + sequence_starts_d[2 * alignment_idx];
const char* const target = sequences_d + sequence_starts_d[2 * alignment_idx + 1];
const int32_t query_size = target - query;
const int32_t target_size = sequences_d + sequence_starts_d[2 * alignment_idx + 2] - target;
const int32_t n_words = ceiling_divide(query_size, word_size);
const int32_t max_bandwidth = max_bandwidths_d[alignment_idx];
assert(max_bandwidth % word_size != 1); // we need at least two bits in the last word
if (max_bandwidth - 1 < abs(target_size - query_size) && query_size != 0 && target_size != 0)
{
if (threadIdx.x == 0)
{
path_starts[alignment_idx] = -1;
path_metadata[alignment_idx] = static_cast<uint32_t>(alignment_idx) | 0;
}
alignment_idx = get_alignment_task(scheduling_index_d, scheduling_atomic_d, n_alignments);
continue;
}
if (target_size == 0 || query_size == 0)
{
// Temporary fix for edge cases target_size == 0 and query_size == 0.
// TODO: check if the regular implementation works for this case.
if (threadIdx.x == 0)
{
if (query_size == 0 && target_size == 0)
{
path_starts[alignment_idx] = -1;
path_metadata[alignment_idx] = static_cast<uint32_t>(alignment_idx) | (1u << 31);
}
else
{
const int32_t path_start = path_start_atomic_d->fetch_add(1, cuda::memory_order_relaxed);
path_starts[alignment_idx] = path_start;
paths_output[path_start] = query_size == 0 ? static_cast<int8_t>(AlignmentState::insertion) : static_cast<int8_t>(AlignmentState::deletion);
path_counts_output[path_start] = query_size + target_size; // one of them is 0.
path_metadata[alignment_idx] = static_cast<uint32_t>(alignment_idx) | (1u << 31);
}
}
alignment_idx = get_alignment_task(scheduling_index_d, scheduling_atomic_d, n_alignments);
continue;
}
__syncwarp();
device_matrix_view<WordType> query_pattern = query_patternsi->get_matrix_view(blockIdx.x, n_words, 4);
for (int32_t idx = threadIdx.x; idx < n_words; idx += warp_size)
{
// TODO query load is inefficient
query_pattern(idx, 0) = myers_generate_query_pattern('A', query, query_size, idx * word_size);
query_pattern(idx, 1) = myers_generate_query_pattern('C', query, query_size, idx * word_size);
query_pattern(idx, 2) = myers_generate_query_pattern('T', query, query_size, idx * word_size);
query_pattern(idx, 3) = myers_generate_query_pattern('G', query, query_size, idx * word_size);
}
__syncwarp();
// Use the Ukkonen algorithm for banding.
// Take an initial guess for the edit distance: max_distance_estimate
// and compute the maximal band of the NW matrix which is required for this distance.
// If the computed distance is smaller accept and compute the backtrace/path,
// otherwise retry with a larger guess (i.e. and larger band).
int32_t max_distance_estimate = max(1, abs(target_size - query_size) + min(target_size, query_size) / initial_distance_guess_factor);
device_matrix_view<WordType> pv;
device_matrix_view<WordType> mv;
device_matrix_view<int32_t> score;
int32_t diagonal_begin = -1;
int32_t diagonal_end = -1;
int32_t band_width = 0;
while (1)
{
int32_t p = min3(target_size, query_size, (max_distance_estimate - abs(target_size - query_size)) / 2);
int32_t band_width_new = min(1 + 2 * p + abs(target_size - query_size), query_size);
if (band_width_new % word_size == 1 && band_width_new != query_size) // we need at least two bits in the last word
{
p += 1;
band_width_new = min(1 + 2 * p + abs(target_size - query_size), query_size);
}
if (band_width_new > max_bandwidth)
{
band_width_new = max_bandwidth;
p = (band_width_new - 1 - abs(target_size - query_size)) / 2;
}
const int32_t n_words_band = ceiling_divide(band_width_new, word_size);
if (static_cast<int64_t>(n_words_band) * static_cast<int64_t>(target_size + 1) > pvi->get_max_elements_per_matrix(blockIdx.x))
{
band_width = -band_width;
break;
}
band_width = band_width_new;
pv = pvi->get_matrix_view(blockIdx.x, n_words_band, target_size + 1);
mv = mvi->get_matrix_view(blockIdx.x, n_words_band, target_size + 1);
score = scorei->get_matrix_view(blockIdx.x, n_words_band, target_size + 1);
diagonal_begin = -1;
diagonal_end = -1;
myers_compute_scores_edit_dist_banded(diagonal_begin, diagonal_end, pv, mv, score, query_pattern, target, query, target_size, query_size, band_width, n_words_band, p);
__syncwarp();
assert(n_words_band > 0 || query_size == 0);
const int32_t cur_edit_distance = n_words_band > 0 ? score(n_words_band - 1, target_size) : target_size;
if (cur_edit_distance <= max_distance_estimate || band_width == query_size)
{
break;
}
if (band_width == max_bandwidth)
{
band_width = -band_width;
break;
}
max_distance_estimate *= 2;
}
int8_t* const path = path_buffer + blockIdx.x * path_buffer_size;
int32_t* const path_counts = path_counts_buffer + blockIdx.x * path_buffer_size;
int32_t path_start = 0;
int32_t path_length = 0;
if (threadIdx.x == 0)
{
if (band_width != 0)
{
path_length = myers_backtrace_banded(path, path_counts, pv, mv, score, diagonal_begin, diagonal_end, abs(band_width), target_size, query_size);
path_start = path_start_atomic_d->fetch_add(path_length, cuda::memory_order_relaxed);
path_starts[alignment_idx] = path_start;
path_metadata[alignment_idx] = static_cast<uint32_t>(alignment_idx) | (band_width > 0 ? (1u << 31) : 0);
}
else
{
path_starts[alignment_idx] = -1;
path_metadata[alignment_idx] = static_cast<uint32_t>(alignment_idx) | 0;
}
}
path_start = __shfl_sync(0xffff'ffffu, path_start, 0);
path_length = __shfl_sync(0xffff'ffffu, path_length, 0);
for (int32_t i = threadIdx.x; i < path_length; i += warp_size)
paths_output[path_start + i] = path[i];
__syncwarp();
for (int32_t i = threadIdx.x; i < path_length; i += warp_size)
path_counts_output[path_start + i] = path_counts[i];
alignment_idx = get_alignment_task(scheduling_index_d, scheduling_atomic_d, n_alignments);
__syncwarp();
}
}
} // namespace myers
int32_t myers_compute_edit_distance(std::string const& target, std::string const& query)
{
if (get_size(query) == 0)
return get_size(target);
const int32_t n_words = (get_size(query) + word_size - 1) / word_size;
CudaStream stream = make_cuda_stream();
DefaultDeviceAllocator allocator = create_default_device_allocator();
int32_t max_sequence_length = std::max(get_size(target), get_size(query));
device_buffer<char> sequences_d(2 * max_sequence_length, allocator, stream.get());
device_buffer<int32_t> sequence_lengths_d(2, allocator, stream.get());
batched_device_matrices<myers::WordType> pv(1, n_words * (get_size(target) + 1), allocator, stream.get());
batched_device_matrices<myers::WordType> mv(1, n_words * (get_size(target) + 1), allocator, stream.get());
batched_device_matrices<int32_t> score(1, n_words * (get_size(target) + 1), allocator, stream.get());
batched_device_matrices<myers::WordType> query_patterns(1, n_words * 4, allocator, stream.get());
std::array<int32_t, 2> lengths = {static_cast<int32_t>(get_size(query)), static_cast<int32_t>(get_size(target))};
cudautils::device_copy_n_async(query.data(), get_size(query), sequences_d.data(), stream.get());
cudautils::device_copy_n_async(target.data(), get_size(target), sequences_d.data() + max_sequence_length, stream.get());
cudautils::device_copy_n_async(lengths.data(), 2, sequence_lengths_d.data(), stream.get());
myers::myers_compute_score_matrix_kernel<<<1, warp_size, 0, stream.get()>>>(pv.get_device_interface(), mv.get_device_interface(), score.get_device_interface(), query_patterns.get_device_interface(), sequences_d.data(), sequence_lengths_d.data(), max_sequence_length, 1);
GW_CU_CHECK_ERR(cudaPeekAtLastError());
matrix<int32_t> score_host = score.get_matrix(0, n_words, get_size(target) + 1, stream.get());
return score_host(n_words - 1, get_size(target));
}
matrix<int32_t> myers_get_full_score_matrix(std::string const& target, std::string const& query)
{
if (get_size(target) == 0)
{
matrix<int32_t> r(get_size(query) + 1, 1);
std::iota(r.data(), r.data() + get_size(query) + 1, 0);
return r;
}
if (get_size(query) == 0)
{
matrix<int32_t> r(1, get_size(target) + 1);
std::iota(r.data(), r.data() + get_size(target) + 1, 0);
return r;
}
CudaStream stream = make_cuda_stream();
DefaultDeviceAllocator allocator = create_default_device_allocator();
int32_t max_sequence_length = std::max(get_size(target), get_size(query));
device_buffer<char> sequences_d(2 * max_sequence_length, allocator, stream.get());
device_buffer<int32_t> sequence_lengths_d(2, allocator, stream.get());
const int32_t n_words = (get_size(query) + word_size - 1) / word_size;
batched_device_matrices<myers::WordType> pv(1, n_words * (get_size(target) + 1), allocator, stream.get());
batched_device_matrices<myers::WordType> mv(1, n_words * (get_size(target) + 1), allocator, stream.get());
batched_device_matrices<int32_t> score(1, n_words * (get_size(target) + 1), allocator, stream.get());
batched_device_matrices<myers::WordType> query_patterns(1, n_words * 4, allocator, stream.get());
batched_device_matrices<int32_t> fullscore(1, (get_size(query) + 1) * (get_size(target) + 1), allocator, stream.get());
std::array<int32_t, 2> lengths = {static_cast<int32_t>(get_size(query)), static_cast<int32_t>(get_size(target))};
cudautils::device_copy_n_async(query.data(), get_size(query), sequences_d.data(), stream.get());
cudautils::device_copy_n_async(target.data(), get_size(target), sequences_d.data() + max_sequence_length, stream.get());
cudautils::device_copy_n_async(lengths.data(), 2, sequence_lengths_d.data(), stream.get());
myers::myers_compute_score_matrix_kernel<<<1, warp_size, 0, stream.get()>>>(pv.get_device_interface(), mv.get_device_interface(), score.get_device_interface(), query_patterns.get_device_interface(), sequences_d.data(), sequence_lengths_d.data(), max_sequence_length, 1);
GW_CU_CHECK_ERR(cudaPeekAtLastError());
{
dim3 n_threads = {32, 4, 1};
dim3 n_blocks = {1, 1, 1};
n_blocks.x = ceiling_divide<int32_t>(get_size<int32_t>(query) + 1, n_threads.x);
n_blocks.y = ceiling_divide<int32_t>(get_size<int32_t>(target) + 1, n_threads.y);
myers::myers_convert_to_full_score_matrix_kernel<<<n_blocks, n_threads, 0, stream.get()>>>(fullscore.get_device_interface(), pv.get_device_interface(), mv.get_device_interface(), score.get_device_interface(), sequence_lengths_d.data(), 0);
GW_CU_CHECK_ERR(cudaPeekAtLastError());
}
matrix<int32_t> fullscore_host = fullscore.get_matrix(0, get_size(query) + 1, get_size(target) + 1, stream.get());
return fullscore_host;
}
void myers_gpu(int8_t* paths_d, int32_t* path_lengths_d, int32_t max_path_length,
char const* sequences_d,
int32_t const* sequence_lengths_d,
int32_t max_sequence_length,
int32_t n_alignments,
batched_device_matrices<myers::WordType>& pv,
batched_device_matrices<myers::WordType>& mv,
batched_device_matrices<int32_t>& score,
batched_device_matrices<myers::WordType>& query_patterns,
cudaStream_t stream)
{
{
const dim3 threads(warp_size, 1, 1);
const dim3 blocks(n_alignments, 1, 1);
myers::myers_compute_score_matrix_kernel<<<blocks, threads, 0, stream>>>(pv.get_device_interface(), mv.get_device_interface(), score.get_device_interface(), query_patterns.get_device_interface(), sequences_d, sequence_lengths_d, max_sequence_length, n_alignments);
}
{
const dim3 threads(128, 1, 1);
const dim3 blocks(ceiling_divide<int32_t>(n_alignments, threads.x), 1, 1);
myers::myers_backtrace_kernel<<<blocks, threads, 0, stream>>>(paths_d, path_lengths_d, max_path_length, pv.get_device_interface(), mv.get_device_interface(), score.get_device_interface(), sequence_lengths_d, n_alignments);
}
GW_CU_CHECK_ERR(cudaPeekAtLastError());
}
int32_t myers_banded_gpu_get_blocks_per_sm()
{
constexpr int block_size = warp_size;
int n_blocks = 0;
GW_CU_CHECK_ERR(cudaOccupancyMaxActiveBlocksPerMultiprocessor(&n_blocks, myers::myers_banded_kernel, block_size, 0));
return n_blocks;
}
void myers_banded_gpu(int8_t* paths_d, int32_t* path_counts_d, int32_t* path_starts_d, uint32_t* path_metadata_d,
char const* sequences_d,
int64_t const* sequence_starts_d,
int32_t const* max_bandwidths_d,
int32_t* scheduling_index_d,
int32_t* scheduling_atomic_int_d,
batched_device_matrices<myers::WordType>& pv,
batched_device_matrices<myers::WordType>& mv,
batched_device_matrices<int32_t>& score,
batched_device_matrices<myers::WordType>& query_patterns,
int8_t* path_buffer_d,
int32_t* path_counts_buffer_d,
int32_t path_buffer_size,
int32_t n_alignments,
int32_t n_launch_blocks,
int32_t n_large_workspaces,
cudaStream_t stream)
{
const dim3 threads(warp_size, 1, 1);
const dim3 blocks(n_launch_blocks, 1, 1);
// Work-around for missing cuda::atomic_ref in libcu++.
static_assert(sizeof(int32_t) == sizeof(cuda::atomic<int32_t, cuda::thread_scope_device>), "cuda::atomic<int32_t> needs to have the same size as int32_t.");
static_assert(alignof(int32_t) == alignof(cuda::atomic<int32_t, cuda::thread_scope_device>), "cuda::atomic<int32_t> needs to have the same alignment as int32_t.");
cuda::atomic<int32_t, cuda::thread_scope_device>* const scheduling_atomic_d = reinterpret_cast<cuda::atomic<int32_t, cuda::thread_scope_device>*>(scheduling_atomic_int_d);
cuda::atomic<int32_t, cuda::thread_scope_device>* const path_start_atomic_d = reinterpret_cast<cuda::atomic<int32_t, cuda::thread_scope_device>*>(path_starts_d + n_alignments);
myers::init_atomics<<<1, 1, 0, stream>>>(path_start_atomic_d, scheduling_atomic_d, n_large_workspaces);
GW_CU_CHECK_ERR(cudaPeekAtLastError());
myers::myers_banded_kernel<<<blocks, threads, 0, stream>>>(paths_d, path_counts_d, path_starts_d, path_metadata_d, path_start_atomic_d,
pv.get_device_interface(), mv.get_device_interface(), score.get_device_interface(), query_patterns.get_device_interface(),
path_buffer_d, path_counts_buffer_d,
sequences_d, sequence_starts_d, max_bandwidths_d, scheduling_index_d, scheduling_atomic_d, path_buffer_size, n_large_workspaces, n_alignments);
GW_CU_CHECK_ERR(cudaPeekAtLastError());
cub::DoubleBuffer<int32_t> keys(path_starts_d, path_counts_buffer_d);
cub::DoubleBuffer<uint32_t> values(path_metadata_d, reinterpret_cast<uint32_t*>(scheduling_index_d));
size_t temp_storage_bytes = 0;
char* buffer = reinterpret_cast<char*>(pv.buffer().data());
size_t buffer_size = sizeof(myers::WordType) * pv.buffer().size();
GW_CU_CHECK_ERR(cub::DeviceRadixSort::SortPairs(nullptr, temp_storage_bytes, keys, values, n_alignments, 0, 32, stream));
if (temp_storage_bytes > buffer_size)
{
throw std::runtime_error("Temporary buffer is too small. Please report this bug to the GenomeWorks developers.");
}
GW_CU_CHECK_ERR(cub::DeviceRadixSort::SortPairs(buffer, temp_storage_bytes, keys, values, n_alignments, 0, 32, stream));
if (keys.Current() != path_starts_d)
{
cudautils::device_copy_n_async(path_counts_buffer_d, n_alignments, path_starts_d, stream); // it is not n_alignments + 1, because path_starts_d[n_al.] should not be sorted
}
if (values.Current() != path_metadata_d)
{
cudautils::device_copy_n_async(reinterpret_cast<uint32_t*>(scheduling_index_d), n_alignments, path_metadata_d, stream);
}
myers::set_minus_one_to_zero<<<(n_alignments + 255) / 256, 256, 0, stream>>>(path_starts_d, n_alignments);
GW_CU_CHECK_ERR(cudaPeekAtLastError());
}
} // namespace cudaaligner
} // namespace genomeworks
} // namespace claraparabricks
|
the_stack
|
// Copyright (c) 2018 Changan Wang
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
#if GOOGLE_CUDA == 1
#define EIGEN_USE_GPU
#include "rotated_ps_roi_align_op.h"
#include "tensorflow/core/util/cuda_kernel_helper.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor_shape.h"
using namespace tensorflow;
#include <cstdint>
#include <cmath>
#include <cfloat>
// Define the CUDA kernel.
template <typename T>
__global__ void RotatedPSROIAlignGradCudaKernel(CudaLaunchConfig config, const T * inputs, const T * rois, const int32_t * orders, const T * pooled_features_grad, const int32_t * pooled_index, T * grad_output, const int32_t grid_dim_width, const int32_t grid_dim_height, const int batch_size, const int num_channals, const int map_height, const int map_width, const int num_rois, const bool using_max_pool) {
const int32_t grid_size = grid_dim_width * grid_dim_height;
const int32_t bank_size = num_channals / grid_size;
CUDA_1D_KERNEL_LOOP(worker_index, config.virtual_thread_count) {
// image_index * roi_index * channal_pos_remainder * row_index * col_index
const int32_t position_index = (worker_index % num_channals) / bank_size;
const int32_t row_index = position_index / grid_dim_width;
const int32_t col_index = position_index % grid_dim_width;
// position of the channal of pooled feature
// position of the channal in the bank of feature map
const int32_t channal_pos_remainder = worker_index % bank_size;
const int32_t pool_index = worker_index / num_channals;
const int32_t image_index = pool_index / num_rois;
const int32_t roi_index = pool_index % num_rois;
const T * roi_to_pool = rois + (image_index * num_rois + roi_index) * 8;
const int32_t * roi_order = orders + image_index * num_rois + roi_index;
T * grad_output_start = reinterpret_cast<T*>(grad_output + (image_index * num_channals + position_index * bank_size + channal_pos_remainder) * map_height * map_width);
const T * pooled_features_start = pooled_features_grad + worker_index;
const int32_t * pooled_index_start = pooled_index + worker_index;
int32_t order = ldg(roi_order) < 0 ? 0 : ldg(roi_order) * 2;
T roi_y0 = static_cast<T>(ldg(roi_to_pool + (order++) % 8) * map_height);
T roi_x0 = static_cast<T>(ldg(roi_to_pool + (order++) % 8) * map_width);
T roi_y1 = static_cast<T>(ldg(roi_to_pool + (order++) % 8) * map_height);
T roi_x1 = static_cast<T>(ldg(roi_to_pool + (order++) % 8) * map_width);
T roi_y2 = static_cast<T>(ldg(roi_to_pool + (order++) % 8) * map_height);
T roi_x2 = static_cast<T>(ldg(roi_to_pool + (order++) % 8) * map_width);
T roi_y3 = static_cast<T>(ldg(roi_to_pool + (order++) % 8) * map_height);
T roi_x3 = static_cast<T>(ldg(roi_to_pool + (order++) % 8) * map_width);
double len0 = static_cast<double>((roi_y1 - roi_y0) * (roi_y1 - roi_y0) + (roi_x1 - roi_x0) * (roi_x1 - roi_x0));
double len1 = static_cast<double>((roi_y2 - roi_y1) * (roi_y2 - roi_y1) + (roi_x2 - roi_x1) * (roi_x2 - roi_x1));
double len2 = static_cast<double>((roi_y3 - roi_y2) * (roi_y3 - roi_y2) + (roi_x3 - roi_x2) * (roi_x3 - roi_x2));
double len3 = static_cast<double>((roi_y0 - roi_y3) * (roi_y0 - roi_y3) + (roi_x0 - roi_x3) * (roi_x0 - roi_x3));
double cross_len0 = static_cast<double>((roi_y0 - roi_y2) * (roi_y0 - roi_y2) + (roi_x0 - roi_x2) * (roi_x0 - roi_x2));
double cross_len1 = static_cast<double>((roi_y3 - roi_y1) * (roi_y3 - roi_y1) + (roi_x3 - roi_x1) * (roi_x3 - roi_x1));
order = ldg(roi_order) < 0 ? (len0 + len2 > len1 + len3 ? 1 : 0) : 0;
// fix ROI
if(len0 < std::numeric_limits<T>::min() || len1 < std::numeric_limits<T>::min() || len2 < std::numeric_limits<T>::min() || len3 < std::numeric_limits<T>::min()){
// not check convex for faster speed
//if(is_convex(roi_to_pool)){
continue;
}
T roi_y0_order = (order == 0) ? roi_y0 : roi_y1;
T roi_x0_order = (order == 0) ? roi_x0 : roi_x1;
T roi_y1_order = (order == 0) ? roi_y1 : roi_y2;
T roi_x1_order = (order == 0) ? roi_x1 : roi_x2;
T roi_y2_order = (order == 0) ? roi_y2 : roi_y3;
T roi_x2_order = (order == 0) ? roi_x2 : roi_x3;
T roi_y3_order = (order == 0) ? roi_y3 : roi_y0;
T roi_x3_order = (order == 0) ? roi_x3 : roi_x0;
T y_step_left = (roi_y3_order - roi_y0_order)/(grid_dim_height * 1.);
T y_step_right = (roi_y2_order - roi_y1_order)/(grid_dim_height * 1.);
T x_step_top = (roi_x1_order - roi_x0_order)/(grid_dim_width * 1.);
T x_step_bottom = (roi_x2_order - roi_x3_order)/(grid_dim_width * 1.);
T left_y1 = (roi_y0_order + row_index * y_step_left);
T right_y1 = (roi_y1_order + row_index * y_step_right);
T left_y2 = (roi_y0_order + (row_index + 1.) * y_step_left);
T right_y2 = (roi_y1_order + (row_index + 1.) * y_step_right);
T left_top_y = left_y1 + col_index * (right_y1 - left_y1)/(grid_dim_width);
T right_top_y = left_y1 + (col_index + 1.) * (right_y1 - left_y1)/(grid_dim_width);
T left_bottom_y = left_y2 + col_index * (right_y2 - left_y2)/(grid_dim_width);
T right_bottom_y = left_y2 + (col_index + 1.) * (right_y2 - left_y2)/(grid_dim_width);
T top_x1 = (roi_x0_order + col_index * x_step_top);
T bottom_x1 = (roi_x3_order + col_index * x_step_bottom);
T top_x2 = (roi_x0_order + (col_index + 1.) * x_step_top);
T bottom_x2 = (roi_x3_order + (col_index + 1.) * x_step_bottom);
T left_top_x = top_x1 + row_index * (bottom_x1 - top_x1)/(grid_dim_height);
T left_bottom_x = top_x1 + (row_index + 1.) * (bottom_x1 - top_x1)/(grid_dim_height);
T right_top_x = top_x2 + row_index * (bottom_x2 - top_x2)/(grid_dim_height);
T right_bottom_x = top_x2 + (row_index + 1.) * (bottom_x2 - top_x2)/(grid_dim_height);
float pool_bin_width = static_cast<float>(tf_max(tf_min(fabsf(right_top_x - left_top_x), fabsf(right_top_y - left_top_y)), tf_min(fabsf(right_bottom_x - left_bottom_x), fabsf(right_bottom_y - left_bottom_y))));
float pool_bin_height = static_cast<float>(tf_max(tf_min(fabsf(left_bottom_x - left_top_x), fabsf(left_bottom_y - left_top_y)), tf_min(fabsf(right_bottom_x - right_top_x), fabsf(right_bottom_y - right_top_y))));
int32_t num_elem_width = static_cast<int32_t>(pool_bin_width) + 1;
int32_t num_elem_height = static_cast<int32_t>(pool_bin_height) + 1;
T grid_y_step_left = (left_bottom_y - left_top_y)/(num_elem_height + 1.);
T grid_y_step_right = (right_bottom_y - right_top_y)/(num_elem_height + 1.);
T grid_x_step_top = (right_top_x - left_top_x)/(num_elem_width + 1.);
T grid_x_step_bottom = (right_bottom_x - left_bottom_x)/(num_elem_width + 1.);
if(using_max_pool){
const int32_t pool_h = ldg(pooled_index_start) / num_elem_width;
const int32_t pool_w = ldg(pooled_index_start) % num_elem_width;
T col_to_pool = (left_top_x + (pool_w + 1.) * grid_x_step_top + left_bottom_x + (pool_w + 1.) * grid_x_step_bottom) / 2.;
T row_to_pool = (left_top_y + (pool_h + 1.) * grid_y_step_left + right_top_y + (pool_h + 1.) * grid_y_step_right) / 2.;
int32_t int_col_to_pool = static_cast<int32_t>(col_to_pool);
int32_t int_row_to_pool = static_cast<int32_t>(row_to_pool);
float float_col_to_pool = col_to_pool - int_col_to_pool;
float float_row_to_pool = row_to_pool - int_row_to_pool;
const T grad_in = ldg(pooled_features_start);
atomicAdd(grad_output_start + int_row_to_pool * map_width + int_col_to_pool, static_cast<T>((1. - float_col_to_pool) * (1. - float_row_to_pool) * grad_in));
atomicAdd(grad_output_start + tf_min(int_row_to_pool + 1, map_height - 1) * map_width + int_col_to_pool, static_cast<T>((1. - float_col_to_pool) * float_row_to_pool * grad_in));
atomicAdd(grad_output_start + int_row_to_pool * map_width + tf_min(int_col_to_pool + 1, map_width - 1), static_cast<T>(float_col_to_pool * (1. - float_row_to_pool) * grad_in));
atomicAdd(grad_output_start + tf_min(int_row_to_pool + 1, map_height - 1) * map_width + tf_min(int_col_to_pool + 1, map_width - 1), static_cast<T>(float_col_to_pool * float_row_to_pool * grad_in));
}else{
const T grad_in = ldg(pooled_features_start) / static_cast<T>(num_elem_width * num_elem_height);
for(int32_t pool_h = 0; pool_h < num_elem_height; ++pool_h){
for(int32_t pool_w = 0; pool_w < num_elem_width; ++pool_w){
T col_to_pool = (left_top_x + (pool_w + 1.) * grid_x_step_top + left_bottom_x + (pool_w + 1.) * grid_x_step_bottom) / 2.;
T row_to_pool = (left_top_y + (pool_h + 1.) * grid_y_step_left + right_top_y + (pool_h + 1.) * grid_y_step_right) / 2.;
int32_t int_col_to_pool = static_cast<int32_t>(col_to_pool);
int32_t int_row_to_pool = static_cast<int32_t>(row_to_pool);
float float_col_to_pool = col_to_pool - int_col_to_pool;
float float_row_to_pool = row_to_pool - int_row_to_pool;
atomicAdd(grad_output_start + int_row_to_pool * map_width + int_col_to_pool, static_cast<T>((1. - float_col_to_pool) * (1. - float_row_to_pool) * grad_in));
atomicAdd(grad_output_start + tf_min(int_row_to_pool + 1, map_height - 1) * map_width + int_col_to_pool, static_cast<T>((1. - float_col_to_pool) * float_row_to_pool * grad_in));
atomicAdd(grad_output_start + int_row_to_pool * map_width + tf_min(int_col_to_pool + 1, map_width - 1), static_cast<T>(float_col_to_pool * (1. - float_row_to_pool) * grad_in));
atomicAdd(grad_output_start + tf_min(int_row_to_pool + 1, map_height - 1) * map_width + tf_min(int_col_to_pool + 1, map_width - 1), static_cast<T>(float_col_to_pool * float_row_to_pool * grad_in));
}
}
}
}
}
template <typename T>
void RotatedPSROIAlignGradFunctor<GPUDevice, T>::operator()(OpKernelContext* context, const GPUDevice& d, typename TTypes<T>::ConstFlat inputs, typename TTypes<T>::ConstFlat rois, typename TTypes<int32_t>::ConstFlat orders, const int32_t grid_dim_width, const int32_t grid_dim_height, typename TTypes<T>::ConstFlat pooled_features_grad, typename TTypes<int32_t>::ConstFlat pooled_index, typename TTypes<T>::Flat grad_output, KDimSize dim_info) {
int batch_size = 0;
int num_channals = 0;
int map_height = 0;
int map_width = 0;
int num_rois = 0;
bool using_max_pool = false;
std::tie(batch_size, num_channals, map_height, map_width, num_rois, using_max_pool) = dim_info;
CudaLaunchConfig config = GetCudaLaunchConfig(batch_size * num_rois * num_channals, d);
//grad_output = grad_output.setZero();
SetZero <<<config.block_count, config.thread_per_block, 0, d.stream()>>> (batch_size * map_height * map_width * num_channals, grad_output.data());
RotatedPSROIAlignGradCudaKernel <<<config.block_count,
config.thread_per_block, 0, d.stream()>>> (config, inputs.data(), rois.data(), orders.data(), pooled_features_grad.data(), pooled_index.data(), grad_output.data(), grid_dim_width, grid_dim_height, batch_size, num_channals, map_height, map_width, num_rois, using_max_pool);
cudaError_t err = cudaGetLastError();
if(cudaSuccess != err)
{
fprintf( stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString( err ) );
exit( -1 );
}
}
template struct RotatedPSROIAlignGradFunctor<GPUDevice, float>;
// #define DEFINE_GPU_SPECS(T) \
// template struct RotatedPSROIAlignFunctorGPU<T>;
// TF_CALL_GPU_NUMBER_TYPES(DEFINE_GPU_SPECS);
#endif // GOOGLE_CUDA
|
the_stack
|
#include <cfloat>
#include "utility.hpp"
using namespace ppl::common;
namespace ppl {
namespace cv {
namespace cuda {
#define RADIUS0 16
#define SMALL_KSIZE0 RADIUS0 * 2 + 1
#define RADIUS1 8
#define SMALL_KSIZE1 RADIUS1 * 2 + 1
template <typename BorderInterpolation>
__global__
void medianC1SharedKernel(const uchar* src, int rows, int cols, int src_stride,
int median_index, int radius, uchar* dst,
int dst_stride, BorderInterpolation interpolation) {
__shared__ uchar data[kDimY0 + RADIUS0 * 2][(kDimX0 << 2) + RADIUS0 * 2];
int element_x = ((blockIdx.x << kShiftX0) + threadIdx.x) << 2;
int element_y = (blockIdx.y << kShiftY0) + threadIdx.y;
{
int index, y_index, row_index;
uchar* input;
uchar value0, value1, value2, value3;
y_index = threadIdx.y;
row_index = element_y - radius;
while (row_index < (int)(((blockIdx.y + 1) << kShiftY0) + radius) &&
row_index < rows + radius) {
index = interpolation(rows, radius, row_index);
input = (uchar*)((uchar*)src + index * src_stride);
if (threadIdx.x < radius) {
if (blockIdx.x == 0) {
index = interpolation(cols, radius, threadIdx.x - radius);
}
else {
index = (blockIdx.x << (kShiftX0 + 2)) + threadIdx.x - radius;
}
value0 = input[index];
data[y_index][threadIdx.x] = value0;
}
if (element_x < cols) {
value0 = input[element_x];
value1 = input[element_x + 1];
value2 = input[element_x + 2];
value3 = input[element_x + 3];
index = radius + (threadIdx.x << 2);
data[y_index][index] = value0;
data[y_index][index + 1] = value1;
data[y_index][index + 2] = value2;
data[y_index][index + 3] = value3;
}
if (threadIdx.x < radius) {
index = (cols - radius) >> (kShiftX0 + 2);
if (blockIdx.x >= index) {
if (blockIdx.x != gridDim.x - 1) {
index = ((blockIdx.x + 1) << (kShiftX0 + 2)) + threadIdx.x;
index = interpolation(cols, radius, index);
value0 = input[index];
index = radius + (kDimX0 << 2) + threadIdx.x;
data[y_index][index] = value0;
}
else {
index = interpolation(cols, radius, cols + threadIdx.x);
value0 = input[index];
index = cols - (blockIdx.x << (kShiftX0 + 2));
index += (radius + threadIdx.x);
data[y_index][index] = value0;
}
}
else {
index = ((blockIdx.x + 1) << (kShiftX0 + 2)) + threadIdx.x;
value0 = input[index];
index = radius + (kDimX0 << 2) + threadIdx.x;
data[y_index][index] = value0;
}
}
y_index += kDimY0;
row_index += kDimY0;
}
}
__syncthreads();
if (element_x >= cols || element_y >= rows) {
return;
}
int ksize = (radius << 1) + 1;
int threadIdx_x = threadIdx.x << 2;
bool unchecked0, unchecked1, unchecked2, unchecked3;
uint4 local_count = make_uint4(0, 0, 0, 0);
uint4 global_count = make_uint4(0, 0, 0, 0);
uchar4 value;
short4 max;
short4 top = make_short4(256, 256, 256, 256);
for (int index = 0; index <= median_index; index++) {
max = make_short4(-1, -1, -1, -1);
unchecked0 = true;
unchecked1 = true;
unchecked2 = true;
unchecked3 = true;
for (int i = 0; i < ksize; i++) {
for (int j = 0; j < ksize; j++) {
value.x = data[threadIdx.y + i][threadIdx_x + j];
value.y = data[threadIdx.y + i][threadIdx_x + j + 1];
value.z = data[threadIdx.y + i][threadIdx_x + j + 2];
value.w = data[threadIdx.y + i][threadIdx_x + j + 3];
if ((!unchecked0) && max.x == value.x) unchecked0 = false;
if ((!unchecked1) && max.y == value.y) unchecked1 = false;
if ((!unchecked2) && max.z == value.z) unchecked2 = false;
if ((!unchecked3) && max.w == value.w) unchecked3 = false;
if (unchecked0 && max.x == value.x) local_count.x++;
if (unchecked1 && max.y == value.y) local_count.y++;
if (unchecked2 && max.z == value.z) local_count.z++;
if (unchecked3 && max.w == value.w) local_count.w++;
if (index + global_count.x <= median_index && max.x < value.x &&
value.x < top.x) {
max.x = value.x;
local_count.x = 0;
}
if (index + global_count.y <= median_index && max.y < value.y &&
value.y < top.y) {
max.y = value.y;
local_count.y = 0;
}
if (index + global_count.z <= median_index && max.z < value.z &&
value.z < top.z) {
max.z = value.z;
local_count.z = 0;
}
if (index + global_count.w <= median_index && max.w < value.w &&
value.w < top.w) {
max.w = value.w;
local_count.w = 0;
}
}
}
global_count.x += local_count.x;
global_count.y += local_count.y;
global_count.z += local_count.z;
global_count.w += local_count.w;
if (max.x != -1) top.x = max.x;
if (max.y != -1) top.y = max.y;
if (max.z != -1) top.z = max.z;
if (max.w != -1) top.w = max.w;
}
uchar* output = (uchar*)((uchar*)dst + element_y * dst_stride);
if (element_x < cols - 3) {
output[element_x] = saturateCast(top.x);
output[element_x + 1] = saturateCast(top.y);
output[element_x + 2] = saturateCast(top.z);
output[element_x + 3] = saturateCast(top.w);
}
else {
output[element_x] = saturateCast(top.x);
if (element_x < cols - 1) {
output[element_x + 1] = saturateCast(top.y);
}
if (element_x < cols - 2) {
output[element_x + 2] = saturateCast(top.z);
}
}
}
template <typename BorderInterpolation>
__global__
void medianC1SharedKernel(const float* src, int rows, int cols, int src_stride,
int median_index, int radius, float* dst,
int dst_stride, BorderInterpolation interpolation) {
__shared__ float data[kDimY0 + RADIUS0 * 2][(kDimX0 << 2) + RADIUS0 * 2];
int element_x = ((blockIdx.x << kShiftX0) + threadIdx.x) << 2;
int element_y = (blockIdx.y << kShiftY0) + threadIdx.y;
{
int index, y_index, row_index;
float* input;
float value0, value1, value2, value3;
y_index = threadIdx.y;
row_index = element_y - radius;
while (row_index < (int)(((blockIdx.y + 1) << kShiftY0) + radius) &&
row_index < rows + radius) {
index = interpolation(rows, radius, row_index);
input = (float*)((uchar*)src + index * src_stride);
if (threadIdx.x < radius) {
if (blockIdx.x == 0) {
index = interpolation(cols, radius, threadIdx.x - radius);
}
else {
index = (blockIdx.x << (kShiftX0 + 2)) + threadIdx.x - radius;
}
value0 = input[index];
data[y_index][threadIdx.x] = value0;
}
if (element_x < cols) {
value0 = input[element_x];
value1 = input[element_x + 1];
value2 = input[element_x + 2];
value3 = input[element_x + 3];
index = radius + (threadIdx.x << 2);
data[y_index][index] = value0;
data[y_index][index + 1] = value1;
data[y_index][index + 2] = value2;
data[y_index][index + 3] = value3;
}
if (threadIdx.x < radius) {
index = (cols - radius) >> (kShiftX0 + 2);
if (blockIdx.x >= index) {
if (blockIdx.x != gridDim.x - 1) {
index = ((blockIdx.x + 1) << (kShiftX0 + 2)) + threadIdx.x;
index = interpolation(cols, radius, index);
value0 = input[index];
index = radius + (kDimX0 << 2) + threadIdx.x;
data[y_index][index] = value0;
}
else {
index = interpolation(cols, radius, cols + threadIdx.x);
value0 = input[index];
index = cols - (blockIdx.x << (kShiftX0 + 2));
index += (radius + threadIdx.x);
data[y_index][index] = value0;
}
}
else {
index = ((blockIdx.x + 1) << (kShiftX0 + 2)) + threadIdx.x;
value0 = input[index];
index = radius + (kDimX0 << 2) + threadIdx.x;
data[y_index][index] = value0;
}
}
y_index += kDimY0;
row_index += kDimY0;
}
}
__syncthreads();
if (element_x >= cols || element_y >= rows) {
return;
}
int ksize = (radius << 1) + 1;
int threadIdx_x = threadIdx.x << 2;
bool unchecked0, unchecked1, unchecked2, unchecked3;
uint4 local_count = make_uint4(0, 0, 0, 0);
uint4 global_count = make_uint4(0, 0, 0, 0);
float4 value;
float4 max;
float4 top = make_float4(FLT_MAX, FLT_MAX, FLT_MAX, FLT_MAX);
for (int index = 0; index <= median_index; index++) {
max = make_float4(FLT_MIN, FLT_MIN, FLT_MIN, FLT_MIN);
unchecked0 = true;
unchecked1 = true;
unchecked2 = true;
unchecked3 = true;
for (int i = 0; i < ksize; i++) {
for (int j = 0; j < ksize; j++) {
value.x = data[threadIdx.y + i][threadIdx_x + j];
value.y = data[threadIdx.y + i][threadIdx_x + j + 1];
value.z = data[threadIdx.y + i][threadIdx_x + j + 2];
value.w = data[threadIdx.y + i][threadIdx_x + j + 3];
if ((!unchecked0) && max.x == value.x) unchecked0 = false;
if ((!unchecked1) && max.y == value.y) unchecked1 = false;
if ((!unchecked2) && max.z == value.z) unchecked2 = false;
if ((!unchecked3) && max.w == value.w) unchecked3 = false;
if (unchecked0 && max.x == value.x) local_count.x++;
if (unchecked1 && max.y == value.y) local_count.y++;
if (unchecked2 && max.z == value.z) local_count.z++;
if (unchecked3 && max.w == value.w) local_count.w++;
if (index + global_count.x <= median_index && max.x < value.x &&
value.x < top.x) {
max.x = value.x;
local_count.x = 0;
}
if (index + global_count.y <= median_index && max.y < value.y &&
value.y < top.y) {
max.y = value.y;
local_count.y = 0;
}
if (index + global_count.z <= median_index && max.z < value.z &&
value.z < top.z) {
max.z = value.z;
local_count.z = 0;
}
if (index + global_count.w <= median_index && max.w < value.w &&
value.w < top.w) {
max.w = value.w;
local_count.w = 0;
}
}
}
global_count.x += local_count.x;
global_count.y += local_count.y;
global_count.z += local_count.z;
global_count.w += local_count.w;
if (max.x != FLT_MIN) top.x = max.x;
if (max.y != FLT_MIN) top.y = max.y;
if (max.z != FLT_MIN) top.z = max.z;
if (max.w != FLT_MIN) top.w = max.w;
}
float* output = (float*)((uchar*)dst + element_y * dst_stride);
if (element_x < cols - 3) {
output[element_x] = top.x;
output[element_x + 1] = top.y;
output[element_x + 2] = top.z;
output[element_x + 3] = top.w;
}
else {
output[element_x] = top.x;
if (element_x < cols - 1) {
output[element_x + 1] = top.y;
}
if (element_x < cols - 2) {
output[element_x + 2] = top.z;
}
}
}
template <typename BorderInterpolation>
__global__
void medianC3SharedKernel(const uchar* src, int rows, int cols, int src_stride,
int median_index, int radius, uchar* dst,
int dst_stride, BorderInterpolation interpolation) {
__shared__ uchar3 data[kDimY0 + RADIUS1 * 2][kDimX0 + RADIUS1 * 2];
int element_x = (blockIdx.x << kShiftX0) + threadIdx.x;
int element_y = (blockIdx.y << kShiftY0) + threadIdx.y;
{
int index, y_index, row_index, col_index;
uchar3* input;
y_index = threadIdx.y;
row_index = element_y - radius;
while (row_index < (int)(((blockIdx.y + 1) << kShiftY0) + radius) &&
row_index < rows + radius) {
index = interpolation(rows, radius, row_index);
input = (uchar3*)((uchar*)src + index * src_stride);
int x_index = threadIdx.x;
col_index = element_x - radius;
while (col_index < (int)(((blockIdx.x + 1) << kShiftX0) + radius) &&
col_index < cols + radius) {
index = interpolation(cols, radius, col_index);
data[y_index][x_index] = input[index];
x_index += kDimX0;
col_index += kDimX0;
}
y_index += kDimY0;
row_index += kDimY0;
}
}
__syncthreads();
if (element_x >= cols || element_y >= rows) {
return;
}
int ksize = (radius << 1) + 1;
bool unchecked0, unchecked1, unchecked2;
uint3 local_count = make_uint3(0, 0, 0);
uint3 global_count = make_uint3(0, 0, 0);
uchar3 value;
short3 max;
short3 top = make_short3(256, 256, 256);
for (int index = 0; index <= median_index; index++) {
max = make_short3(-1, -1, -1);
unchecked0 = true;
unchecked1 = true;
unchecked2 = true;
for (int i = 0; i < ksize; i++) {
for (int j = 0; j < ksize; j++) {
value = data[threadIdx.y + i][threadIdx.x + j];
if ((!unchecked0) && max.x == value.x) unchecked0 = false;
if ((!unchecked1) && max.y == value.y) unchecked1 = false;
if ((!unchecked2) && max.z == value.z) unchecked2 = false;
if (unchecked0 && max.x == value.x) local_count.x++;
if (unchecked1 && max.y == value.y) local_count.y++;
if (unchecked2 && max.z == value.z) local_count.z++;
if (index + global_count.x <= median_index && max.x < value.x &&
value.x < top.x) {
max.x = value.x;
local_count.x = 0;
}
if (index + global_count.y <= median_index && max.y < value.y &&
value.y < top.y) {
max.y = value.y;
local_count.y = 0;
}
if (index + global_count.z <= median_index && max.z < value.z &&
value.z < top.z) {
max.z = value.z;
local_count.z = 0;
}
}
}
global_count.x += local_count.x;
global_count.y += local_count.y;
global_count.z += local_count.z;
if (max.x != -1) top.x = max.x;
if (max.y != -1) top.y = max.y;
if (max.z != -1) top.z = max.z;
}
uchar3* output = (uchar3*)((uchar*)dst + element_y * dst_stride);
output[element_x] = saturateCastVector<uchar3, short3>(top);
}
template <typename BorderInterpolation>
__global__
void medianC3SharedKernel(const float* src, int rows, int cols, int src_stride,
int median_index, int radius, float* dst,
int dst_stride, BorderInterpolation interpolation) {
__shared__ float3 data[kDimY0 + RADIUS1 * 2][kDimX0 + RADIUS1 * 2];
int element_x = (blockIdx.x << kShiftX0) + threadIdx.x;
int element_y = (blockIdx.y << kShiftY0) + threadIdx.y;
{
int index, y_index, row_index, col_index;
float3* input;
y_index = threadIdx.y;
row_index = element_y - radius;
while (row_index < (int)(((blockIdx.y + 1) << kShiftY0) + radius) &&
row_index < rows + radius) {
index = interpolation(rows, radius, row_index);
input = (float3*)((uchar*)src + index * src_stride);
int x_index = threadIdx.x;
col_index = element_x - radius;
while (col_index < (int)(((blockIdx.x + 1) << kShiftX0) + radius) &&
col_index < cols + radius) {
index = interpolation(cols, radius, col_index);
data[y_index][x_index] = input[index];
x_index += kDimX0;
col_index += kDimX0;
}
y_index += kDimY0;
row_index += kDimY0;
}
}
__syncthreads();
if (element_x >= cols || element_y >= rows) {
return;
}
int ksize = (radius << 1) + 1;
bool unchecked0, unchecked1, unchecked2;
uint3 local_count = make_uint3(0, 0, 0);
uint3 global_count = make_uint3(0, 0, 0);
float3 value;
float3 max;
float3 top = make_float3(FLT_MAX, FLT_MAX, FLT_MAX);
for (int index = 0; index <= median_index; index++) {
max = make_float3(FLT_MIN, FLT_MIN, FLT_MIN);
unchecked0 = true;
unchecked1 = true;
unchecked2 = true;
for (int i = 0; i < ksize; i++) {
for (int j = 0; j < ksize; j++) {
value = data[threadIdx.y + i][threadIdx.x + j];
if ((!unchecked0) && max.x == value.x) unchecked0 = false;
if ((!unchecked1) && max.y == value.y) unchecked1 = false;
if ((!unchecked2) && max.z == value.z) unchecked2 = false;
if (unchecked0 && max.x == value.x) local_count.x++;
if (unchecked1 && max.y == value.y) local_count.y++;
if (unchecked2 && max.z == value.z) local_count.z++;
if (index + global_count.x <= median_index && max.x < value.x &&
value.x < top.x) {
max.x = value.x;
local_count.x = 0;
}
if (index + global_count.y <= median_index && max.y < value.y &&
value.y < top.y) {
max.y = value.y;
local_count.y = 0;
}
if (index + global_count.z <= median_index && max.z < value.z &&
value.z < top.z) {
max.z = value.z;
local_count.z = 0;
}
}
}
global_count.x += local_count.x;
global_count.y += local_count.y;
global_count.z += local_count.z;
if (max.x != FLT_MIN) top.x = max.x;
if (max.y != FLT_MIN) top.y = max.y;
if (max.z != FLT_MIN) top.z = max.z;
}
float3* output = (float3*)((uchar*)dst + element_y * dst_stride);
output[element_x] = top;
}
template <typename BorderInterpolation>
__global__
void medianC4SharedKernel(const uchar* src, int rows, int cols, int src_stride,
int median_index, int radius, uchar* dst,
int dst_stride, BorderInterpolation interpolation) {
__shared__ uchar4 data[kDimY0 + RADIUS1 * 2][kDimX0 + RADIUS1 * 2];
int element_x = (blockIdx.x << kShiftX0) + threadIdx.x;
int element_y = (blockIdx.y << kShiftY0) + threadIdx.y;
{
int index, y_index, row_index, col_index;
uchar4* input;
y_index = threadIdx.y;
row_index = element_y - radius;
while (row_index < (int)(((blockIdx.y + 1) << kShiftY0) + radius) &&
row_index < rows + radius) {
index = interpolation(rows, radius, row_index);
input = (uchar4*)((uchar*)src + index * src_stride);
int x_index = threadIdx.x;
col_index = element_x - radius;
while (col_index < (int)(((blockIdx.x + 1) << kShiftX0) + radius) &&
col_index < cols + radius) {
index = interpolation(cols, radius, col_index);
data[y_index][x_index] = input[index];
x_index += kDimX0;
col_index += kDimX0;
}
y_index += kDimY0;
row_index += kDimY0;
}
}
__syncthreads();
if (element_x >= cols || element_y >= rows) {
return;
}
int ksize = (radius << 1) + 1;
bool unchecked0, unchecked1, unchecked2, unchecked3;
uint4 local_count = make_uint4(0, 0, 0, 0);
uint4 global_count = make_uint4(0, 0, 0, 0);
uchar4 value;
short4 max;
short4 top = make_short4(256, 256, 256, 256);
for (int index = 0; index <= median_index; index++) {
max = make_short4(-1, -1, -1, -1);
unchecked0 = true;
unchecked1 = true;
unchecked2 = true;
unchecked3 = true;
for (int i = 0; i < ksize; i++) {
for (int j = 0; j < ksize; j++) {
value = data[threadIdx.y + i][threadIdx.x + j];
if ((!unchecked0) && max.x == value.x) unchecked0 = false;
if ((!unchecked1) && max.y == value.y) unchecked1 = false;
if ((!unchecked2) && max.z == value.z) unchecked2 = false;
if ((!unchecked3) && max.w == value.w) unchecked3 = false;
if (unchecked0 && max.x == value.x) local_count.x++;
if (unchecked1 && max.y == value.y) local_count.y++;
if (unchecked2 && max.z == value.z) local_count.z++;
if (unchecked3 && max.w == value.w) local_count.w++;
if (index + global_count.x <= median_index && max.x < value.x &&
value.x < top.x) {
max.x = value.x;
local_count.x = 0;
}
if (index + global_count.y <= median_index && max.y < value.y &&
value.y < top.y) {
max.y = value.y;
local_count.y = 0;
}
if (index + global_count.z <= median_index && max.z < value.z &&
value.z < top.z) {
max.z = value.z;
local_count.z = 0;
}
if (index + global_count.w <= median_index && max.w < value.w &&
value.w < top.w) {
max.w = value.w;
local_count.w = 0;
}
}
}
global_count.x += local_count.x;
global_count.y += local_count.y;
global_count.z += local_count.z;
global_count.w += local_count.w;
if (max.x != -1) top.x = max.x;
if (max.y != -1) top.y = max.y;
if (max.z != -1) top.z = max.z;
if (max.w != -1) top.w = max.w;
}
uchar4* output = (uchar4*)((uchar*)dst + element_y * dst_stride);
output[element_x] = saturateCastVector<uchar4, short4>(top);
}
template <typename BorderInterpolation>
__global__
void medianC4SharedKernel(const float* src, int rows, int cols, int src_stride,
int median_index, int radius, float* dst,
int dst_stride, BorderInterpolation interpolation) {
__shared__ float4 data[kDimY0 + RADIUS1 * 2][kDimX0 + RADIUS1 * 2];
int element_x = (blockIdx.x << kShiftX0) + threadIdx.x;
int element_y = (blockIdx.y << kShiftY0) + threadIdx.y;
{
int index, y_index, row_index, col_index;
float4* input;
y_index = threadIdx.y;
row_index = element_y - radius;
while (row_index < (int)(((blockIdx.y + 1) << kShiftY0) + radius) &&
row_index < rows + radius) {
index = interpolation(rows, radius, row_index);
input = (float4*)((uchar*)src + index * src_stride);
int x_index = threadIdx.x;
col_index = element_x - radius;
while (col_index < (int)(((blockIdx.x + 1) << kShiftX0) + radius) &&
col_index < cols + radius) {
index = interpolation(cols, radius, col_index);
data[y_index][x_index] = input[index];
x_index += kDimX0;
col_index += kDimX0;
}
y_index += kDimY0;
row_index += kDimY0;
}
}
__syncthreads();
if (element_x >= cols || element_y >= rows) {
return;
}
int ksize = (radius << 1) + 1;
bool unchecked0, unchecked1, unchecked2, unchecked3;
uint4 local_count = make_uint4(0, 0, 0, 0);
uint4 global_count = make_uint4(0, 0, 0, 0);
float4 value;
float4 max;
float4 top = make_float4(FLT_MAX, FLT_MAX, FLT_MAX, FLT_MAX);
for (int index = 0; index <= median_index; index++) {
max = make_float4(FLT_MIN, FLT_MIN, FLT_MIN, FLT_MIN);
unchecked0 = true;
unchecked1 = true;
unchecked2 = true;
unchecked3 = true;
for (int i = 0; i < ksize; i++) {
for (int j = 0; j < ksize; j++) {
value = data[threadIdx.y + i][threadIdx.x + j];
if ((!unchecked0) && max.x == value.x) unchecked0 = false;
if ((!unchecked1) && max.y == value.y) unchecked1 = false;
if ((!unchecked2) && max.z == value.z) unchecked2 = false;
if ((!unchecked3) && max.w == value.w) unchecked3 = false;
if (unchecked0 && max.x == value.x) local_count.x++;
if (unchecked1 && max.y == value.y) local_count.y++;
if (unchecked2 && max.z == value.z) local_count.z++;
if (unchecked3 && max.w == value.w) local_count.w++;
if (index + global_count.x <= median_index && max.x < value.x &&
value.x < top.x) {
max.x = value.x;
local_count.x = 0;
}
if (index + global_count.y <= median_index && max.y < value.y &&
value.y < top.y) {
max.y = value.y;
local_count.y = 0;
}
if (index + global_count.z <= median_index && max.z < value.z &&
value.z < top.z) {
max.z = value.z;
local_count.z = 0;
}
if (index + global_count.w <= median_index && max.w < value.w &&
value.w < top.w) {
max.w = value.w;
local_count.w = 0;
}
}
}
global_count.x += local_count.x;
global_count.y += local_count.y;
global_count.z += local_count.z;
global_count.w += local_count.w;
if (max.x != FLT_MIN) top.x = max.x;
if (max.y != FLT_MIN) top.y = max.y;
if (max.z != FLT_MIN) top.z = max.z;
if (max.w != FLT_MIN) top.w = max.w;
}
float4* output = (float4*)((uchar*)dst + element_y * dst_stride);
output[element_x] = top;
}
template <typename BorderInterpolation>
__global__
void medianC1Kernel(const uchar* src, int rows, int cols, int src_stride,
int median_index, int radius, uchar* dst, int dst_stride,
BorderInterpolation interpolation) {
int element_x = ((blockIdx.x << kBlockShiftX0) + threadIdx.x) << 2;
int element_y = (blockIdx.y << kBlockShiftY0) + threadIdx.y;
if (element_x >= cols || element_y >= rows) {
return;
}
int origin_x = element_x - radius;
int origin_y = element_y - radius;
int top_x = element_x + radius;
int top_y = element_y + radius;
int data_index;
bool unchecked0, unchecked1, unchecked2, unchecked3;
uint4 local_count = make_uint4(0, 0, 0, 0);
uint4 global_count = make_uint4(0, 0, 0, 0);
uchar* input;
uchar4 value;
short4 max;
short4 top = make_short4(256, 256, 256, 256);
bool isnt_border_block = true;
data_index = radius >> (kBlockShiftX0 + 2);
if (blockIdx.x <= data_index) isnt_border_block = false;
data_index = (cols - radius) >> (kBlockShiftX0 + 2);
if (blockIdx.x >= data_index) isnt_border_block = false;
if (isnt_border_block) {
for (int index = 0; index <= median_index; index++) {
max = make_short4(-1, -1, -1, -1);
unchecked0 = true;
unchecked1 = true;
unchecked2 = true;
unchecked3 = true;
for (int i = origin_y; i <= top_y; i++) {
data_index = interpolation(rows, radius, i);
input = (uchar*)((uchar*)src + data_index * src_stride);
for (int j = origin_x; j <= top_x; j++) {
value.x = input[j];
value.y = input[j + 1];
value.z = input[j + 2];
value.w = input[j + 3];
if ((!unchecked0) && max.x == value.x) unchecked0 = false;
if ((!unchecked1) && max.y == value.y) unchecked1 = false;
if ((!unchecked2) && max.z == value.z) unchecked2 = false;
if ((!unchecked3) && max.w == value.w) unchecked3 = false;
if (unchecked0 && max.x == value.x) local_count.x++;
if (unchecked1 && max.y == value.y) local_count.y++;
if (unchecked2 && max.z == value.z) local_count.z++;
if (unchecked3 && max.w == value.w) local_count.w++;
if (index + global_count.x <= median_index && max.x < value.x &&
value.x < top.x) {
max.x = value.x;
local_count.x = 0;
}
if (index + global_count.y <= median_index && max.y < value.y &&
value.y < top.y) {
max.y = value.y;
local_count.y = 0;
}
if (index + global_count.z <= median_index && max.z < value.z &&
value.z < top.z) {
max.z = value.z;
local_count.z = 0;
}
if (index + global_count.w <= median_index && max.w < value.w &&
value.w < top.w) {
max.w = value.w;
local_count.w = 0;
}
}
}
global_count.x += local_count.x;
global_count.y += local_count.y;
global_count.z += local_count.z;
global_count.w += local_count.w;
if (max.x != -1) top.x = max.x;
if (max.y != -1) top.y = max.y;
if (max.z != -1) top.z = max.z;
if (max.w != -1) top.w = max.w;
}
}
else {
for (int index = 0; index <= median_index; index++) {
max = make_short4(-1, -1, -1, -1);
unchecked0 = true;
unchecked1 = true;
unchecked2 = true;
unchecked3 = true;
for (int i = origin_y; i <= top_y; i++) {
data_index = interpolation(rows, radius, i);
input = (uchar*)((uchar*)src + data_index * src_stride);
for (int j = origin_x; j <= top_x; j++) {
data_index = interpolation(cols, radius, j);
value.x = input[data_index];
data_index = interpolation(cols, radius, j + 1);
value.y = input[data_index];
data_index = interpolation(cols, radius, j + 2);
value.z = input[data_index];
data_index = interpolation(cols, radius, j + 3);
value.w = input[data_index];
if ((!unchecked0) && max.x == value.x) unchecked0 = false;
if ((!unchecked1) && max.y == value.y) unchecked1 = false;
if ((!unchecked2) && max.z == value.z) unchecked2 = false;
if ((!unchecked3) && max.w == value.w) unchecked3 = false;
if (unchecked0 && max.x == value.x) local_count.x++;
if (unchecked1 && max.y == value.y) local_count.y++;
if (unchecked2 && max.z == value.z) local_count.z++;
if (unchecked3 && max.w == value.w) local_count.w++;
if (index + global_count.x <= median_index && max.x < value.x &&
value.x < top.x) {
max.x = value.x;
local_count.x = 0;
}
if (index + global_count.y <= median_index && max.y < value.y &&
value.y < top.y) {
max.y = value.y;
local_count.y = 0;
}
if (index + global_count.z <= median_index && max.z < value.z &&
value.z < top.z) {
max.z = value.z;
local_count.z = 0;
}
if (index + global_count.w <= median_index && max.w < value.w &&
value.w < top.w) {
max.w = value.w;
local_count.w = 0;
}
}
}
global_count.x += local_count.x;
global_count.y += local_count.y;
global_count.z += local_count.z;
global_count.w += local_count.w;
if (max.x != -1) top.x = max.x;
if (max.y != -1) top.y = max.y;
if (max.z != -1) top.z = max.z;
if (max.w != -1) top.w = max.w;
}
}
uchar* output = (uchar*)((uchar*)dst + element_y * dst_stride);
if (element_x < cols - 3) {
output[element_x] = saturateCast(top.x);
output[element_x + 1] = saturateCast(top.y);
output[element_x + 2] = saturateCast(top.z);
output[element_x + 3] = saturateCast(top.w);
}
else {
output[element_x] = saturateCast(top.x);
if (element_x < cols - 1) {
output[element_x + 1] = saturateCast(top.y);
}
if (element_x < cols - 2) {
output[element_x + 2] = saturateCast(top.z);
}
}
}
template <typename BorderInterpolation>
__global__
void medianC1Kernel(const float* src, int rows, int cols, int src_stride,
int median_index, int radius, float* dst, int dst_stride,
BorderInterpolation interpolation) {
int element_x = ((blockIdx.x << kBlockShiftX1) + threadIdx.x) << 2;
int element_y = (blockIdx.y << kBlockShiftY1) + threadIdx.y;
if (element_x >= cols || element_y >= rows) {
return;
}
int origin_x = element_x - radius;
int origin_y = element_y - radius;
int top_x = element_x + radius;
int top_y = element_y + radius;
int data_index;
bool unchecked0, unchecked1, unchecked2, unchecked3;
uint4 local_count = make_uint4(0, 0, 0, 0);
uint4 global_count = make_uint4(0, 0, 0, 0);
float* input;
float4 value;
float4 max;
float4 top = make_float4(FLT_MAX, FLT_MAX, FLT_MAX, FLT_MAX);
bool isnt_border_block = true;
data_index = radius >> (kBlockShiftX0 + 2);
if (blockIdx.x <= data_index) isnt_border_block = false;
data_index = (cols - radius) >> (kBlockShiftX0 + 2);
if (blockIdx.x >= data_index) isnt_border_block = false;
if (isnt_border_block) {
for (int index = 0; index <= median_index; index++) {
max = make_float4(FLT_MIN, FLT_MIN, FLT_MIN, FLT_MIN);
unchecked0 = true;
unchecked1 = true;
unchecked2 = true;
unchecked3 = true;
for (int i = origin_y; i <= top_y; i++) {
data_index = interpolation(rows, radius, i);
input = (float*)((uchar*)src + data_index * src_stride);
for (int j = origin_x; j <= top_x; j++) {
value.x = input[j];
value.y = input[j + 1];
value.z = input[j + 2];
value.w = input[j + 3];
if ((!unchecked0) && max.x == value.x) unchecked0 = false;
if ((!unchecked1) && max.y == value.y) unchecked1 = false;
if ((!unchecked2) && max.z == value.z) unchecked2 = false;
if ((!unchecked3) && max.w == value.w) unchecked3 = false;
if (unchecked0 && max.x == value.x) local_count.x++;
if (unchecked1 && max.y == value.y) local_count.y++;
if (unchecked2 && max.z == value.z) local_count.z++;
if (unchecked3 && max.w == value.w) local_count.w++;
if (index + global_count.x <= median_index && max.x < value.x &&
value.x < top.x) {
max.x = value.x;
local_count.x = 0;
}
if (index + global_count.y <= median_index && max.y < value.y &&
value.y < top.y) {
max.y = value.y;
local_count.y = 0;
}
if (index + global_count.z <= median_index && max.z < value.z &&
value.z < top.z) {
max.z = value.z;
local_count.z = 0;
}
if (index + global_count.w <= median_index && max.w < value.w &&
value.w < top.w) {
max.w = value.w;
local_count.w = 0;
}
}
}
global_count.x += local_count.x;
global_count.y += local_count.y;
global_count.z += local_count.z;
global_count.w += local_count.w;
if (max.x != FLT_MIN) top.x = max.x;
if (max.y != FLT_MIN) top.y = max.y;
if (max.z != FLT_MIN) top.z = max.z;
if (max.w != FLT_MIN) top.w = max.w;
}
}
else {
for (int index = 0; index <= median_index; index++) {
max = make_float4(FLT_MIN, FLT_MIN, FLT_MIN, FLT_MIN);
unchecked0 = true;
unchecked1 = true;
unchecked2 = true;
unchecked3 = true;
for (int i = origin_y; i <= top_y; i++) {
data_index = interpolation(rows, radius, i);
input = (float*)((uchar*)src + data_index * src_stride);
for (int j = origin_x; j <= top_x; j++) {
data_index = interpolation(cols, radius, j);
value.x = input[data_index];
data_index = interpolation(cols, radius, j + 1);
value.y = input[data_index];
data_index = interpolation(cols, radius, j + 2);
value.z = input[data_index];
data_index = interpolation(cols, radius, j + 3);
value.w = input[data_index];
if ((!unchecked0) && max.x == value.x) unchecked0 = false;
if ((!unchecked1) && max.y == value.y) unchecked1 = false;
if ((!unchecked2) && max.z == value.z) unchecked2 = false;
if ((!unchecked3) && max.w == value.w) unchecked3 = false;
if (unchecked0 && max.x == value.x) local_count.x++;
if (unchecked1 && max.y == value.y) local_count.y++;
if (unchecked2 && max.z == value.z) local_count.z++;
if (unchecked3 && max.w == value.w) local_count.w++;
if (index + global_count.x <= median_index && max.x < value.x &&
value.x < top.x) {
max.x = value.x;
local_count.x = 0;
}
if (index + global_count.y <= median_index && max.y < value.y &&
value.y < top.y) {
max.y = value.y;
local_count.y = 0;
}
if (index + global_count.z <= median_index && max.z < value.z &&
value.z < top.z) {
max.z = value.z;
local_count.z = 0;
}
if (index + global_count.w <= median_index && max.w < value.w &&
value.w < top.w) {
max.w = value.w;
local_count.w = 0;
}
}
}
global_count.x += local_count.x;
global_count.y += local_count.y;
global_count.z += local_count.z;
global_count.w += local_count.w;
if (max.x != FLT_MIN) top.x = max.x;
if (max.y != FLT_MIN) top.y = max.y;
if (max.z != FLT_MIN) top.z = max.z;
if (max.w != FLT_MIN) top.w = max.w;
}
}
float* output = (float*)((uchar*)dst + element_y * dst_stride);
if (element_x < cols - 3) {
output[element_x] = top.x;
output[element_x + 1] = top.y;
output[element_x + 2] = top.z;
output[element_x + 3] = top.w;
}
else {
output[element_x] = top.x;
if (element_x < cols - 1) {
output[element_x + 1] = top.y;
}
if (element_x < cols - 2) {
output[element_x + 2] = top.z;
}
}
}
template <typename BorderInterpolation>
__global__
void medianC3Kernel(const uchar* src, int rows, int cols, int src_stride,
int median_index, int radius, uchar* dst, int dst_stride,
BorderInterpolation interpolation) {
int element_x = (blockIdx.x << kBlockShiftX0) + threadIdx.x;
int element_y = (blockIdx.y << kBlockShiftY0) + threadIdx.y;
if (element_x >= cols || element_y >= rows) {
return;
}
int origin_x = element_x - radius;
int origin_y = element_y - radius;
int top_x = element_x + radius;
int top_y = element_y + radius;
int data_index;
bool unchecked0, unchecked1, unchecked2;
uint3 local_count = make_uint3(0, 0, 0);
uint3 global_count = make_uint3(0, 0, 0);
uchar3* input;
uchar3 value;
short3 max;
short3 top = make_short3(256, 256, 256);
bool isnt_border_block = true;
data_index = radius >> (kBlockShiftX0 + 2);
if (blockIdx.x <= data_index) isnt_border_block = false;
data_index = (cols - radius) >> (kBlockShiftX0 + 2);
if (blockIdx.x >= data_index) isnt_border_block = false;
if (isnt_border_block) {
for (int index = 0; index <= median_index; index++) {
max = make_short3(-1, -1, -1);
unchecked0 = true;
unchecked1 = true;
unchecked2 = true;
for (int i = origin_y; i <= top_y; i++) {
data_index = interpolation(rows, radius, i);
input = (uchar3*)((uchar*)src + data_index * src_stride);
for (int j = origin_x; j <= top_x; j++) {
value = input[j];
if ((!unchecked0) && max.x == value.x) unchecked0 = false;
if ((!unchecked1) && max.y == value.y) unchecked1 = false;
if ((!unchecked2) && max.z == value.z) unchecked2 = false;
if (unchecked0 && max.x == value.x) local_count.x++;
if (unchecked1 && max.y == value.y) local_count.y++;
if (unchecked2 && max.z == value.z) local_count.z++;
if (index + global_count.x <= median_index && max.x < value.x &&
value.x < top.x) {
max.x = value.x;
local_count.x = 0;
}
if (index + global_count.y <= median_index && max.y < value.y &&
value.y < top.y) {
max.y = value.y;
local_count.y = 0;
}
if (index + global_count.z <= median_index && max.z < value.z &&
value.z < top.z) {
max.z = value.z;
local_count.z = 0;
}
}
}
global_count.x += local_count.x;
global_count.y += local_count.y;
global_count.z += local_count.z;
if (max.x != -1) top.x = max.x;
if (max.y != -1) top.y = max.y;
if (max.z != -1) top.z = max.z;
}
}
else {
for (int index = 0; index <= median_index; index++) {
max = make_short3(-1, -1, -1);
unchecked0 = true;
unchecked1 = true;
unchecked2 = true;
for (int i = origin_y; i <= top_y; i++) {
data_index = interpolation(rows, radius, i);
input = (uchar3*)((uchar*)src + data_index * src_stride);
for (int j = origin_x; j <= top_x; j++) {
data_index = interpolation(cols, radius, j);
value = input[data_index];
if ((!unchecked0) && max.x == value.x) unchecked0 = false;
if ((!unchecked1) && max.y == value.y) unchecked1 = false;
if ((!unchecked2) && max.z == value.z) unchecked2 = false;
if (unchecked0 && max.x == value.x) local_count.x++;
if (unchecked1 && max.y == value.y) local_count.y++;
if (unchecked2 && max.z == value.z) local_count.z++;
if (index + global_count.x <= median_index && max.x < value.x &&
value.x < top.x) {
max.x = value.x;
local_count.x = 0;
}
if (index + global_count.y <= median_index && max.y < value.y &&
value.y < top.y) {
max.y = value.y;
local_count.y = 0;
}
if (index + global_count.z <= median_index && max.z < value.z &&
value.z < top.z) {
max.z = value.z;
local_count.z = 0;
}
}
}
global_count.x += local_count.x;
global_count.y += local_count.y;
global_count.z += local_count.z;
if (max.x != -1) top.x = max.x;
if (max.y != -1) top.y = max.y;
if (max.z != -1) top.z = max.z;
}
}
uchar3* output = (uchar3*)((uchar*)dst + element_y * dst_stride);
output[element_x] = saturateCastVector<uchar3, short3>(top);
}
template <typename BorderInterpolation>
__global__
void medianC3Kernel(const float* src, int rows, int cols, int src_stride,
int median_index, int radius, float* dst, int dst_stride,
BorderInterpolation interpolation) {
int element_x = (blockIdx.x << kBlockShiftX1) + threadIdx.x;
int element_y = (blockIdx.y << kBlockShiftY1) + threadIdx.y;
if (element_x >= cols || element_y >= rows) {
return;
}
int origin_x = element_x - radius;
int origin_y = element_y - radius;
int top_x = element_x + radius;
int top_y = element_y + radius;
int data_index;
bool unchecked0, unchecked1, unchecked2;
uint3 local_count = make_uint3(0, 0, 0);
uint3 global_count = make_uint3(0, 0, 0);
float3* input;
float3 value;
float3 max;
float3 top = make_float3(FLT_MAX, FLT_MAX, FLT_MAX);
bool isnt_border_block = true;
data_index = radius >> (kBlockShiftX0 + 2);
if (blockIdx.x <= data_index) isnt_border_block = false;
data_index = (cols - radius) >> (kBlockShiftX0 + 2);
if (blockIdx.x >= data_index) isnt_border_block = false;
if (isnt_border_block) {
for (int index = 0; index <= median_index; index++) {
max = make_float3(FLT_MIN, FLT_MIN, FLT_MIN);
unchecked0 = true;
unchecked1 = true;
unchecked2 = true;
for (int i = origin_y; i <= top_y; i++) {
data_index = interpolation(rows, radius, i);
input = (float3*)((uchar*)src + data_index * src_stride);
for (int j = origin_x; j <= top_x; j++) {
value = input[j];
if ((!unchecked0) && max.x == value.x) unchecked0 = false;
if ((!unchecked1) && max.y == value.y) unchecked1 = false;
if ((!unchecked2) && max.z == value.z) unchecked2 = false;
if (unchecked0 && max.x == value.x) local_count.x++;
if (unchecked1 && max.y == value.y) local_count.y++;
if (unchecked2 && max.z == value.z) local_count.z++;
if (index + global_count.x <= median_index && max.x < value.x &&
value.x < top.x) {
max.x = value.x;
local_count.x = 0;
}
if (index + global_count.y <= median_index && max.y < value.y &&
value.y < top.y) {
max.y = value.y;
local_count.y = 0;
}
if (index + global_count.z <= median_index && max.z < value.z &&
value.z < top.z) {
max.z = value.z;
local_count.z = 0;
}
}
}
global_count.x += local_count.x;
global_count.y += local_count.y;
global_count.z += local_count.z;
if (max.x != FLT_MIN) top.x = max.x;
if (max.y != FLT_MIN) top.y = max.y;
if (max.z != FLT_MIN) top.z = max.z;
}
}
else {
for (int index = 0; index <= median_index; index++) {
max = make_float3(FLT_MIN, FLT_MIN, FLT_MIN);
unchecked0 = true;
unchecked1 = true;
unchecked2 = true;
for (int i = origin_y; i <= top_y; i++) {
data_index = interpolation(rows, radius, i);
input = (float3*)((uchar*)src + data_index * src_stride);
for (int j = origin_x; j <= top_x; j++) {
data_index = interpolation(cols, radius, j);
value = input[data_index];
if ((!unchecked0) && max.x == value.x) unchecked0 = false;
if ((!unchecked1) && max.y == value.y) unchecked1 = false;
if ((!unchecked2) && max.z == value.z) unchecked2 = false;
if (unchecked0 && max.x == value.x) local_count.x++;
if (unchecked1 && max.y == value.y) local_count.y++;
if (unchecked2 && max.z == value.z) local_count.z++;
if (index + global_count.x <= median_index && max.x < value.x &&
value.x < top.x) {
max.x = value.x;
local_count.x = 0;
}
if (index + global_count.y <= median_index && max.y < value.y &&
value.y < top.y) {
max.y = value.y;
local_count.y = 0;
}
if (index + global_count.z <= median_index && max.z < value.z &&
value.z < top.z) {
max.z = value.z;
local_count.z = 0;
}
}
}
global_count.x += local_count.x;
global_count.y += local_count.y;
global_count.z += local_count.z;
if (max.x != FLT_MIN) top.x = max.x;
if (max.y != FLT_MIN) top.y = max.y;
if (max.z != FLT_MIN) top.z = max.z;
}
}
float3* output = (float3*)((uchar*)dst + element_y * dst_stride);
output[element_x] = top;
}
template <typename BorderInterpolation>
__global__
void medianC4Kernel(const uchar* src, int rows, int cols, int src_stride,
int median_index, int radius, uchar* dst, int dst_stride,
BorderInterpolation interpolation) {
int element_x = (blockIdx.x << kBlockShiftX0) + threadIdx.x;
int element_y = (blockIdx.y << kBlockShiftY0) + threadIdx.y;
if (element_x >= cols || element_y >= rows) {
return;
}
int origin_x = element_x - radius;
int origin_y = element_y - radius;
int top_x = element_x + radius;
int top_y = element_y + radius;
int data_index;
bool unchecked0, unchecked1, unchecked2, unchecked3;
uint4 local_count = make_uint4(0, 0, 0, 0);
uint4 global_count = make_uint4(0, 0, 0, 0);
uchar4* input;
uchar4 value;
short4 max;
short4 top = make_short4(256, 256, 256, 256);
bool isnt_border_block = true;
data_index = radius >> (kBlockShiftX0 + 2);
if (blockIdx.x <= data_index) isnt_border_block = false;
data_index = (cols - radius) >> (kBlockShiftX0 + 2);
if (blockIdx.x >= data_index) isnt_border_block = false;
if (isnt_border_block) {
for (int index = 0; index <= median_index; index++) {
max = make_short4(-1, -1, -1, -1);
unchecked0 = true;
unchecked1 = true;
unchecked2 = true;
unchecked3 = true;
for (int i = origin_y; i <= top_y; i++) {
data_index = interpolation(rows, radius, i);
input = (uchar4*)((uchar*)src + data_index * src_stride);
for (int j = origin_x; j <= top_x; j++) {
value = input[j];
if ((!unchecked0) && max.x == value.x) unchecked0 = false;
if ((!unchecked1) && max.y == value.y) unchecked1 = false;
if ((!unchecked2) && max.z == value.z) unchecked2 = false;
if ((!unchecked3) && max.w == value.w) unchecked3 = false;
if (unchecked0 && max.x == value.x) local_count.x++;
if (unchecked1 && max.y == value.y) local_count.y++;
if (unchecked2 && max.z == value.z) local_count.z++;
if (unchecked3 && max.w == value.w) local_count.w++;
if (index + global_count.x <= median_index && max.x < value.x &&
value.x < top.x) {
max.x = value.x;
local_count.x = 0;
}
if (index + global_count.y <= median_index && max.y < value.y &&
value.y < top.y) {
max.y = value.y;
local_count.y = 0;
}
if (index + global_count.z <= median_index && max.z < value.z &&
value.z < top.z) {
max.z = value.z;
local_count.z = 0;
}
if (index + global_count.w <= median_index && max.w < value.w &&
value.w < top.w) {
max.w = value.w;
local_count.w = 0;
}
}
}
global_count.x += local_count.x;
global_count.y += local_count.y;
global_count.z += local_count.z;
global_count.w += local_count.w;
if (max.x != -1) top.x = max.x;
if (max.y != -1) top.y = max.y;
if (max.z != -1) top.z = max.z;
if (max.w != -1) top.w = max.w;
}
}
else {
for (int index = 0; index <= median_index; index++) {
max = make_short4(-1, -1, -1, -1);
unchecked0 = true;
unchecked1 = true;
unchecked2 = true;
unchecked3 = true;
for (int i = origin_y; i <= top_y; i++) {
data_index = interpolation(rows, radius, i);
input = (uchar4*)((uchar*)src + data_index * src_stride);
for (int j = origin_x; j <= top_x; j++) {
data_index = interpolation(cols, radius, j);
value = input[data_index];
if ((!unchecked0) && max.x == value.x) unchecked0 = false;
if ((!unchecked1) && max.y == value.y) unchecked1 = false;
if ((!unchecked2) && max.z == value.z) unchecked2 = false;
if ((!unchecked3) && max.w == value.w) unchecked3 = false;
if (unchecked0 && max.x == value.x) local_count.x++;
if (unchecked1 && max.y == value.y) local_count.y++;
if (unchecked2 && max.z == value.z) local_count.z++;
if (unchecked3 && max.w == value.w) local_count.w++;
if (index + global_count.x <= median_index && max.x < value.x &&
value.x < top.x) {
max.x = value.x;
local_count.x = 0;
}
if (index + global_count.y <= median_index && max.y < value.y &&
value.y < top.y) {
max.y = value.y;
local_count.y = 0;
}
if (index + global_count.z <= median_index && max.z < value.z &&
value.z < top.z) {
max.z = value.z;
local_count.z = 0;
}
if (index + global_count.w <= median_index && max.w < value.w &&
value.w < top.w) {
max.w = value.w;
local_count.w = 0;
}
}
}
global_count.x += local_count.x;
global_count.y += local_count.y;
global_count.z += local_count.z;
global_count.w += local_count.w;
if (max.x != -1) top.x = max.x;
if (max.y != -1) top.y = max.y;
if (max.z != -1) top.z = max.z;
if (max.w != -1) top.w = max.w;
}
}
uchar4* output = (uchar4*)((uchar*)dst + element_y * dst_stride);
output[element_x] = saturateCastVector<uchar4, short4>(top);
}
template <typename BorderInterpolation>
__global__
void medianC4Kernel(const float* src, int rows, int cols, int src_stride,
int median_index, int radius, float* dst, int dst_stride,
BorderInterpolation interpolation) {
int element_x = (blockIdx.x << kBlockShiftX1) + threadIdx.x;
int element_y = (blockIdx.y << kBlockShiftY1) + threadIdx.y;
if (element_x >= cols || element_y >= rows) {
return;
}
int origin_x = element_x - radius;
int origin_y = element_y - radius;
int top_x = element_x + radius;
int top_y = element_y + radius;
int data_index;
bool unchecked0, unchecked1, unchecked2, unchecked3;
uint4 local_count = make_uint4(0, 0, 0, 0);
uint4 global_count = make_uint4(0, 0, 0, 0);
float4* input;
float4 value;
float4 max;
float4 top = make_float4(FLT_MAX, FLT_MAX, FLT_MAX, FLT_MAX);
bool isnt_border_block = true;
data_index = radius >> (kBlockShiftX0 + 2);
if (blockIdx.x <= data_index) isnt_border_block = false;
data_index = (cols - radius) >> (kBlockShiftX0 + 2);
if (blockIdx.x >= data_index) isnt_border_block = false;
if (isnt_border_block) {
for (int index = 0; index <= median_index; index++) {
max = make_float4(FLT_MIN, FLT_MIN, FLT_MIN, FLT_MIN);
unchecked0 = true;
unchecked1 = true;
unchecked2 = true;
unchecked3 = true;
for (int i = origin_y; i <= top_y; i++) {
data_index = interpolation(rows, radius, i);
input = (float4*)((uchar*)src + data_index * src_stride);
for (int j = origin_x; j <= top_x; j++) {
value = input[j];
if ((!unchecked0) && max.x == value.x) unchecked0 = false;
if ((!unchecked1) && max.y == value.y) unchecked1 = false;
if ((!unchecked2) && max.z == value.z) unchecked2 = false;
if ((!unchecked3) && max.w == value.w) unchecked3 = false;
if (unchecked0 && max.x == value.x) local_count.x++;
if (unchecked1 && max.y == value.y) local_count.y++;
if (unchecked2 && max.z == value.z) local_count.z++;
if (unchecked3 && max.w == value.w) local_count.w++;
if (index + global_count.x <= median_index && max.x < value.x &&
value.x < top.x) {
max.x = value.x;
local_count.x = 0;
}
if (index + global_count.y <= median_index && max.y < value.y &&
value.y < top.y) {
max.y = value.y;
local_count.y = 0;
}
if (index + global_count.z <= median_index && max.z < value.z &&
value.z < top.z) {
max.z = value.z;
local_count.z = 0;
}
if (index + global_count.w <= median_index && max.w < value.w &&
value.w < top.w) {
max.w = value.w;
local_count.w = 0;
}
}
}
global_count.x += local_count.x;
global_count.y += local_count.y;
global_count.z += local_count.z;
global_count.w += local_count.w;
if (max.x != FLT_MIN) top.x = max.x;
if (max.y != FLT_MIN) top.y = max.y;
if (max.z != FLT_MIN) top.z = max.z;
if (max.w != FLT_MIN) top.w = max.w;
}
}
else {
for (int index = 0; index <= median_index; index++) {
max = make_float4(FLT_MIN, FLT_MIN, FLT_MIN, FLT_MIN);
unchecked0 = true;
unchecked1 = true;
unchecked2 = true;
unchecked3 = true;
for (int i = origin_y; i <= top_y; i++) {
data_index = interpolation(rows, radius, i);
input = (float4*)((uchar*)src + data_index * src_stride);
for (int j = origin_x; j <= top_x; j++) {
data_index = interpolation(cols, radius, j);
value = input[data_index];
if ((!unchecked0) && max.x == value.x) unchecked0 = false;
if ((!unchecked1) && max.y == value.y) unchecked1 = false;
if ((!unchecked2) && max.z == value.z) unchecked2 = false;
if ((!unchecked3) && max.w == value.w) unchecked3 = false;
if (unchecked0 && max.x == value.x) local_count.x++;
if (unchecked1 && max.y == value.y) local_count.y++;
if (unchecked2 && max.z == value.z) local_count.z++;
if (unchecked3 && max.w == value.w) local_count.w++;
if (index + global_count.x <= median_index && max.x < value.x &&
value.x < top.x) {
max.x = value.x;
local_count.x = 0;
}
if (index + global_count.y <= median_index && max.y < value.y &&
value.y < top.y) {
max.y = value.y;
local_count.y = 0;
}
if (index + global_count.z <= median_index && max.z < value.z &&
value.z < top.z) {
max.z = value.z;
local_count.z = 0;
}
if (index + global_count.w <= median_index && max.w < value.w &&
value.w < top.w) {
max.w = value.w;
local_count.w = 0;
}
}
}
global_count.x += local_count.x;
global_count.y += local_count.y;
global_count.z += local_count.z;
global_count.w += local_count.w;
if (max.x != FLT_MIN) top.x = max.x;
if (max.y != FLT_MIN) top.y = max.y;
if (max.z != FLT_MIN) top.z = max.z;
if (max.w != FLT_MIN) top.w = max.w;
}
}
float4* output = (float4*)((uchar*)dst + element_y * dst_stride);
output[element_x] = top;
}
#define RUN_CHANNEL1_SMALL_KERNELS(Interpolation) \
Interpolation interpolation; \
medianC1SharedKernel<Interpolation><<<grid, block, 0, stream>>>(src, rows, \
cols, src_stride, median_index, radius, dst, dst_stride, interpolation);
#define RUN_CHANNELN_SMALL_KERNELS(Interpolation) \
Interpolation interpolation; \
if (channels == 3) { \
medianC3SharedKernel<Interpolation><<<grid, block, 0, stream>>>(src, rows, \
cols, src_stride, median_index, radius, dst, dst_stride, interpolation); \
} \
else { \
medianC4SharedKernel<Interpolation><<<grid, block, 0, stream>>>(src, rows, \
cols, src_stride, median_index, radius, dst, dst_stride, interpolation); \
}
#define RUN_KERNELS0(grid_x, Interpolation) \
Interpolation interpolation; \
if (channels == 1) { \
grid0.x = grid_x; \
medianC1Kernel<Interpolation><<<grid0, block0, 0, stream>>>(src, rows, cols, \
src_stride, median_index, radius, dst, dst_stride, interpolation); \
} \
else if (channels == 3) { \
medianC3Kernel<Interpolation><<<grid0, block0, 0, stream>>>(src, rows, cols, \
src_stride, median_index, radius, dst, dst_stride, interpolation); \
} \
else { \
medianC4Kernel<Interpolation><<<grid0, block0, 0, stream>>>(src, rows, cols, \
src_stride, median_index, radius, dst, dst_stride, interpolation); \
}
#define RUN_KERNELS1(grid_x, Interpolation) \
Interpolation interpolation; \
if (channels == 1) { \
grid0.x = grid_x; \
medianC1Kernel<Interpolation><<<grid0, block0, 0, stream>>>(src, rows, cols, \
src_stride, median_index, radius, dst, dst_stride, interpolation); \
} \
else if (channels == 3) { \
medianC3Kernel<Interpolation><<<grid0, block0, 0, stream>>>(src, rows, cols, \
src_stride, median_index, radius, dst, dst_stride, interpolation); \
} \
else { \
medianC4Kernel<Interpolation><<<grid0, block0, 0, stream>>>(src, rows, cols, \
src_stride, median_index, radius, dst, dst_stride, interpolation); \
}
RetCode medainblur(const uchar* src, int rows, int cols, int channels,
int src_stride, uchar* dst, int dst_stride, int ksize,
BorderType border_type, cudaStream_t stream) {
PPL_ASSERT(src != nullptr);
PPL_ASSERT(dst != nullptr);
PPL_ASSERT(rows >= 1 && cols >= 1);
PPL_ASSERT(channels == 1 || channels == 3 || channels == 4);
PPL_ASSERT(src_stride >= cols * channels * (int)sizeof(uchar));
PPL_ASSERT(dst_stride >= cols * channels * (int)sizeof(uchar));
PPL_ASSERT(ksize > 1);
PPL_ASSERT((ksize & 1) == 1);
PPL_ASSERT(border_type == BORDER_TYPE_REPLICATE ||
border_type == BORDER_TYPE_REFLECT ||
border_type == BORDER_TYPE_REFLECT_101 ||
border_type == BORDER_TYPE_DEFAULT);
uint radius = ksize >> 1;
uint median_index = ksize * ksize >> 1;
cudaError_t code;
if (ksize <= SMALL_KSIZE0 && channels == 1) {
dim3 block, grid;
block.x = kDimX0;
block.y = kDimY0;
grid.x = divideUp(divideUp(cols, 4, 2), kDimX0, kShiftX0);
grid.y = divideUp(rows, kDimY0, kShiftY0);
if (border_type == BORDER_TYPE_REPLICATE) {
RUN_CHANNEL1_SMALL_KERNELS(ReplicateBorder);
}
else if (border_type == BORDER_TYPE_REFLECT) {
RUN_CHANNEL1_SMALL_KERNELS(ReflectBorder);
}
else {
RUN_CHANNEL1_SMALL_KERNELS(Reflect101Border);
}
code = cudaGetLastError();
if (code != cudaSuccess) {
LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code);
return RC_DEVICE_RUNTIME_ERROR;
}
return RC_SUCCESS;
}
if (ksize <= SMALL_KSIZE1 && (channels == 3 || channels == 4)) {
dim3 block, grid;
block.x = kDimX0;
block.y = kDimY0;
grid.x = divideUp(cols, kDimX0, kShiftX0);
grid.y = divideUp(rows, kDimY0, kShiftY0);
if (border_type == BORDER_TYPE_REPLICATE) {
RUN_CHANNELN_SMALL_KERNELS(ReplicateBorder);
}
else if (border_type == BORDER_TYPE_REFLECT) {
RUN_CHANNELN_SMALL_KERNELS(ReflectBorder);
}
else {
RUN_CHANNELN_SMALL_KERNELS(Reflect101Border);
}
code = cudaGetLastError();
if (code != cudaSuccess) {
LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code);
return RC_DEVICE_RUNTIME_ERROR;
}
return RC_SUCCESS;
}
dim3 block0, grid0;
block0.x = kBlockDimX0;
block0.y = kBlockDimY0;
grid0.x = divideUp(cols, kBlockDimX0, kBlockShiftX0);
grid0.y = divideUp(rows, kBlockDimY0, kBlockShiftY0);
int grid_x = divideUp(divideUp(cols, 4, 2), kBlockDimX0, kBlockShiftX0);
if (border_type == BORDER_TYPE_REPLICATE) {
RUN_KERNELS0(grid_x, ReplicateBorder);
}
else if (border_type == BORDER_TYPE_REFLECT) {
RUN_KERNELS0(grid_x, ReflectBorder);
}
else {
RUN_KERNELS0(grid_x, Reflect101Border);
}
code = cudaGetLastError();
if (code != cudaSuccess) {
LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code);
return RC_DEVICE_RUNTIME_ERROR;
}
return RC_SUCCESS;
}
RetCode medainblur(const float* src, int rows, int cols, int channels,
int src_stride, float* dst, int dst_stride, int ksize,
BorderType border_type, cudaStream_t stream) {
PPL_ASSERT(src != nullptr);
PPL_ASSERT(dst != nullptr);
PPL_ASSERT(rows >= 1 && cols >= 1);
PPL_ASSERT(channels == 1 || channels == 3 || channels == 4);
PPL_ASSERT(src_stride >= cols * channels * (int)sizeof(float));
PPL_ASSERT(dst_stride >= cols * channels * (int)sizeof(float));
PPL_ASSERT(ksize > 1);
PPL_ASSERT((ksize & 1) == 1);
PPL_ASSERT(border_type == BORDER_TYPE_REPLICATE ||
border_type == BORDER_TYPE_REFLECT ||
border_type == BORDER_TYPE_REFLECT_101 ||
border_type == BORDER_TYPE_DEFAULT);
uint radius = ksize >> 1;
uint median_index = ksize * ksize >> 1;
cudaError_t code;
if (ksize <= SMALL_KSIZE0 && channels == 1) {
dim3 block, grid;
block.x = kDimX0;
block.y = kDimY0;
grid.x = divideUp(divideUp(cols, 4, 2), kDimX0, kShiftX0);
grid.y = divideUp(rows, kDimY0, kShiftY0);
if (border_type == BORDER_TYPE_REPLICATE) {
RUN_CHANNEL1_SMALL_KERNELS(ReplicateBorder);
}
else if (border_type == BORDER_TYPE_REFLECT) {
RUN_CHANNEL1_SMALL_KERNELS(ReflectBorder);
}
else {
RUN_CHANNEL1_SMALL_KERNELS(Reflect101Border);
}
code = cudaGetLastError();
if (code != cudaSuccess) {
LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code);
return RC_DEVICE_RUNTIME_ERROR;
}
return RC_SUCCESS;
}
if (ksize <= SMALL_KSIZE1 && (channels == 3 || channels == 4)) {
dim3 block, grid;
block.x = kDimX0;
block.y = kDimY0;
grid.x = divideUp(cols, kDimX0, kShiftX0);
grid.y = divideUp(rows, kDimY0, kShiftY0);
if (border_type == BORDER_TYPE_REPLICATE) {
RUN_CHANNELN_SMALL_KERNELS(ReplicateBorder);
}
else if (border_type == BORDER_TYPE_REFLECT) {
RUN_CHANNELN_SMALL_KERNELS(ReflectBorder);
}
else {
RUN_CHANNELN_SMALL_KERNELS(Reflect101Border);
}
code = cudaGetLastError();
if (code != cudaSuccess) {
LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code);
return RC_DEVICE_RUNTIME_ERROR;
}
return RC_SUCCESS;
}
dim3 block0, grid0;
block0.x = kBlockDimX1;
block0.y = kBlockDimY1;
grid0.x = divideUp(cols, kBlockDimX1, kBlockShiftX1);
grid0.y = divideUp(rows, kBlockDimY1, kBlockShiftY1);
int grid_x = divideUp(divideUp(cols, 4, 2), kBlockDimX1, kBlockShiftX1);
if (border_type == BORDER_TYPE_REPLICATE) {
RUN_KERNELS1(grid_x, ReplicateBorder);
}
else if (border_type == BORDER_TYPE_REFLECT) {
RUN_KERNELS1(grid_x, ReflectBorder);
}
else {
RUN_KERNELS1(grid_x, Reflect101Border);
}
code = cudaGetLastError();
if (code != cudaSuccess) {
LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code);
return RC_DEVICE_RUNTIME_ERROR;
}
return RC_SUCCESS;
}
template <>
RetCode MedianBlur<uchar, 1>(cudaStream_t stream,
int height,
int width,
int inWidthStride,
const uchar* inData,
int outWidthStride,
uchar* outData,
int ksize,
BorderType border_type) {
RetCode code = medainblur(inData, height, width, 1, inWidthStride, outData,
outWidthStride, ksize, border_type, stream);
return code;
}
template <>
RetCode MedianBlur<uchar, 3>(cudaStream_t stream,
int height,
int width,
int inWidthStride,
const uchar* inData,
int outWidthStride,
uchar* outData,
int ksize,
BorderType border_type) {
RetCode code = medainblur(inData, height, width, 3, inWidthStride, outData,
outWidthStride, ksize, border_type, stream);
return code;
}
template <>
RetCode MedianBlur<uchar, 4>(cudaStream_t stream,
int height,
int width,
int inWidthStride,
const uchar* inData,
int outWidthStride,
uchar* outData,
int ksize,
BorderType border_type) {
RetCode code = medainblur(inData, height, width, 4, inWidthStride, outData,
outWidthStride, ksize, border_type, stream);
return code;
}
template <>
RetCode MedianBlur<float, 1>(cudaStream_t stream,
int height,
int width,
int inWidthStride,
const float* inData,
int outWidthStride,
float* outData,
int ksize,
BorderType border_type) {
inWidthStride *= sizeof(float);
outWidthStride *= sizeof(float);
RetCode code = medainblur(inData, height, width, 1, inWidthStride, outData,
outWidthStride, ksize, border_type, stream);
return code;
}
template <>
RetCode MedianBlur<float, 3>(cudaStream_t stream,
int height,
int width,
int inWidthStride,
const float* inData,
int outWidthStride,
float* outData,
int ksize,
BorderType border_type) {
inWidthStride *= sizeof(float);
outWidthStride *= sizeof(float);
RetCode code = medainblur(inData, height, width, 3, inWidthStride, outData,
outWidthStride, ksize, border_type, stream);
return code;
}
template <>
RetCode MedianBlur<float, 4>(cudaStream_t stream,
int height,
int width,
int inWidthStride,
const float* inData,
int outWidthStride,
float* outData,
int ksize,
BorderType border_type) {
inWidthStride *= sizeof(float);
outWidthStride *= sizeof(float);
RetCode code = medainblur(inData, height, width, 4, inWidthStride, outData,
outWidthStride, ksize, border_type, stream);
return code;
}
} // cuda
} // cv
} // ppl
|
the_stack
|
#include <ops/declarable/helpers/random.h>
//#include <NativeOps.h>
#include <array/NDArrayFactory.h>
#include <exceptions/cuda_exception.h>
#include <graph/Context.h>
#include <helpers/ConstantTadHelper.h>
#include <helpers/PointersManager.h>
#include <helpers/RandomLauncher.h>
#include <helpers/ShapeUtils.h>
#include <memory>
#include <vector>
namespace sd {
namespace ops {
namespace helpers {
/**
* gammaLess - compute gamma distributed value for shapes (alpha) from 0 to 1
* @tparam T - any float types are acceptable
* @param U - uniform random generated vals
* @param alpha - shape of distribution
* @param beta - scale of distributed values
* @return gamma distributed value
*/
template <typename T>
T SD_DEVICE gammaLess(T const* U, sd::LongType index, sd::LongType maxLength, T const alpha, T const beta) {
auto d = T(1.0334f) - T(0.0766f) * math::p_exp(T(2.2942f) * alpha);
auto a = math::p_pow(T(2.f), alpha) * math::p_pow(T(1.f) - math::p_exp(-d * T(0.5f)), alpha);
auto b = alpha * math::p_pow(d, alpha - T(1.f)) * exp(-d);
auto c = a + b;
T rawX;
auto indexV = index;
auto underAlpha = T(1.f) / alpha;
auto powerAlpha = math::p_pow(T(2.f), alpha - T(1.f));
for (;;) {
auto u = (indexV < maxLength) ? U[indexV++] : U[0];
if (indexV >= maxLength) indexV = 0LL;
// math::atomics::sd_atomicAdd(index, 1LL);
if (u <= a / c)
rawX = -T(2.f) * math::p_log(T(1.f) - T(0.5f) * math::p_pow(c * u, underAlpha));
else
rawX = -math::p_log(c * (T(1.f) - u) / (alpha * math::p_pow(d, alpha - T(1.f))));
T v = indexV < maxLength ? U[indexV++] : U[0];
if (indexV >= maxLength) indexV = 0LL;
// math::atomics::sd_atomicAdd(index, 1LL);
if (rawX <= d) {
auto testVal = (math::p_pow(rawX, alpha - 1.f) * math::p_exp(-T(0.5f) * rawX)) /
(powerAlpha * math::p_pow(T(1.f) - math::p_exp(-T(0.5f) * rawX), alpha - T(1.f)));
if (testVal < v) continue;
break;
} else {
if (v <= math::p_pow(d / rawX, T(1.f) - alpha)) break;
continue;
}
}
return rawX / beta;
}
/**
* gammaGreat - generate gamma distributed value for shape (alpha) greater then 1
* @tparam T - given type (any float type is accepted.)
* @param rng - random generator
* @param alpha - shape of the gamma distribution (alpha)
* @param beta - scale of the gamma distribution (beta)
* @return - gamma distributed value with given params
*/
template <typename T>
T SD_DEVICE gammaGreat(T const* U, sd::LongType index, sd::LongType maxLength, T const alpha, T const beta) {
auto decreasedAlpha = alpha - T(1.f / 3.f);
auto c = T(1.) / math::p_sqrt(T(9.f) * decreasedAlpha);
// static auto index = 0LL;
auto indexV = index;
T x;
auto normalDistributed = [U, maxLength](sd::LongType& index) {
auto v1 = index < maxLength ? U[index++] : U[0];
if (index >= maxLength) index = 0LL;
// math::atomics::sd_atomicAdd(index, 1LL);
auto v2 = index < maxLength ? U[index++] : U[0];
if (index >= maxLength) index = 0LL;
// math::atomics::sd_atomicAdd(index, 1LL);
return math::p_cos(T(2.f * 3.141592f) * v2) * math::p_sqrt(T(-2.f) * math::p_log(v1));
};
float normalizedVar;
for (;;) {
do {
x = normalDistributed(indexV); // printf("X = %f\n", x);
normalizedVar = T(1.f) + c * x;
} while (normalizedVar < T(0.f));
normalizedVar = normalizedVar * normalizedVar * normalizedVar; // v * v * v;
auto u = U[indexV++];
if (indexV >= maxLength) indexV = 0LL;
// math::atomics::sd_atomicAdd(index, 1LL);
if (u < T(1.f) - T(.0331f) * (x * x) * (x * x)) break; // return (d * v / b);
if (log(u) < 0.5f * x * x + decreasedAlpha * (1. - normalizedVar + math::p_log(normalizedVar))) break;
}
return (decreasedAlpha * normalizedVar / beta);
}
/*
* fillGammaKernel - fill up output with gamma distributed values
*
* uList - uniformly distributed values set
* uLength - length of uList
* alpha - alpha param
* beta - beta param
* output - distributed output.
* */
template <typename T>
static SD_KERNEL void fillGammaKernel(T const* uList, sd::LongType uLength, T const* alpha,
const sd::LongType* alphaShape, T const* beta, const sd::LongType* betaShape,
T* output, const sd::LongType* outputShape) {
// fill up
__shared__ sd::LongType aLength;
__shared__ sd::LongType outLength;
if (threadIdx.x == 0) {
aLength = shape::length(alphaShape);
outLength = shape::length(outputShape) / aLength;
}
__syncthreads();
for (auto k = blockIdx.x; k < (int)outLength; k += gridDim.x) {
auto pos = k * aLength;
// auto u = uList[k]; // this is a vector
// sd::LongType index = k;
for (auto e = threadIdx.x; e < (int)aLength; e += blockDim.x) {
auto aIndex = shape::getIndexOffset(e, alphaShape);
auto bIndex = betaShape ? shape::getIndexOffset(e, betaShape) : -1LL;
auto betaV = T(beta != nullptr ? beta[bIndex] : T(1.f));
auto zIndex = shape::getIndexOffset(e + pos, outputShape);
output[zIndex] = alpha[aIndex] > T(1.f) ? gammaGreat(uList, pos, uLength, alpha[aIndex], betaV)
: gammaLess(uList, pos, uLength, alpha[aIndex], betaV);
}
}
}
template <typename T>
static void fillRandomGamma_(LaunchContext* context, graph::RandomGenerator& rng, NDArray* alpha, NDArray* beta,
NDArray* output) {
// To fill up output need to broadcast alpha and beta to the same shape and in
const sd::LongType* broadcasted = nullptr;
if (beta != nullptr)
ShapeUtils::evalBroadcastShapeInfo(*alpha, *beta, true, broadcasted, context->getWorkspace());
else
broadcasted = alpha->shapeInfo();
auto step = shape::length(broadcasted);
auto shift = output->lengthOf() * 4LL; // 2-wise greater case for uniform vals
auto copyAlpha = alpha;
auto copyBeta = beta;
if (beta != nullptr) {
NDArray alphaBroadcasted(broadcasted, alpha->dataType(), true, context);
NDArray betaBroadcasted(broadcasted, beta->dataType(), true, context);
copyAlpha = new NDArray(alphaBroadcasted.applyTrueBroadcast(BroadcastOpsTuple::Assign(), *alpha));
copyBeta = new NDArray(betaBroadcasted.applyTrueBroadcast(BroadcastOpsTuple::Assign(), *beta));
// if (!copyAlpha->isActualOnDevice()) copyAlpha->syncToDevice();
// if (!copyBeta->isActualOnDevice()) copyBeta->syncToDevice();
}
auto stream = context->getCudaStream();
NDArray uniform = NDArrayFactory::create<T>('c', {shift}, context);
uniform.syncToDevice();
// fill up uniform with given length
RandomLauncher::fillUniform(context, rng, &uniform, 0.0000000001, 0.9999999999);
uniform.syncToDevice();
fillGammaKernel<T><<<128, 128, 256, *stream>>>(
uniform.dataBuffer()->specialAsT<T>(), shift, copyAlpha->dataBuffer()->specialAsT<T>(),
copyAlpha->specialShapeInfo(), beta ? copyBeta->dataBuffer()->specialAsT<T>() : (T const*)nullptr,
beta ? copyBeta->specialShapeInfo() : (sd::LongType const*)nullptr, output->dataBuffer()->specialAsT<T>(),
output->specialShapeInfo());
if (beta != nullptr) {
delete copyAlpha;
delete copyBeta;
// delete broadcasted;
}
}
void fillRandomGamma(LaunchContext* context, graph::RandomGenerator& rng, NDArray* alpha, NDArray* beta,
NDArray* output) {
if (beta)
NDArray::prepareSpecialUse({output}, {alpha, beta});
else
NDArray::prepareSpecialUse({output}, {alpha});
BUILD_SINGLE_SELECTOR(output->dataType(), fillRandomGamma_, (context, rng, alpha, beta, output), SD_FLOAT_NATIVE);
if (beta)
NDArray::registerSpecialUse({output}, {alpha, beta});
else
NDArray::prepareSpecialUse({output}, {alpha});
}
BUILD_SINGLE_TEMPLATE(template void fillRandomGamma_,
(LaunchContext * context, graph::RandomGenerator& rng, NDArray* alpha, NDArray* beta,
NDArray* output),
SD_FLOAT_NATIVE);
/*
* algorithm Poisson generator based upon the inversion by sequential search
*
init:
Let x ← 0, p ← e−λ, s ← p.
using uniformly random sequence U (u in U) distributed at [0, 1].
while u > s do:
x ← x + 1.
p ← p * λ / x.
s ← s + p.
return x.
* */
template <typename T>
static SD_KERNEL void fillPoissonKernel(T* uList, sd::LongType uLength, T* lambda, const sd::LongType* lambdaShape,
T* output, const sd::LongType* outputShape) {
__shared__ sd::LongType step;
if (threadIdx.x == 0) {
step = shape::length(lambdaShape);
}
__syncthreads();
for (auto k = blockIdx.x; k < (int)uLength; k += gridDim.x) {
auto pos = k * step;
auto u = uList[k];
for (auto e = threadIdx.x; e < step; e += blockDim.x) {
auto p = math::sd_exp<T, T>(-lambda[e]);
auto s = p;
auto x = T(0.f);
auto lIndex = shape::getIndexOffset(e, lambdaShape);
auto zIndex = shape::getIndexOffset(e + pos, outputShape);
while (u > s) {
x += T(1.);
p *= lambda[lIndex] / x;
s += p;
}
output[zIndex] = x;
}
}
}
template <typename T>
static void fillRandomPoisson_(LaunchContext* context, graph::RandomGenerator& rng, NDArray* lambda, NDArray* output) {
auto shift = output->lengthOf() / lambda->lengthOf();
NDArray uniform('c', {shift}, output->dataType());
auto stream = context->getCudaStream();
// fill up uniform with given length
RandomLauncher::fillUniform(context, rng, &uniform, 0., 1.);
fillPoissonKernel<T><<<128, 256, 128, *stream>>>(uniform.dataBuffer()->specialAsT<T>(), uniform.lengthOf(),
lambda->dataBuffer()->specialAsT<T>(), lambda->specialShapeInfo(),
output->dataBuffer()->specialAsT<T>(), output->specialShapeInfo());
}
void fillRandomPoisson(LaunchContext* context, graph::RandomGenerator& rng, NDArray* lambda, NDArray* output) {
NDArray::prepareSpecialUse({output}, {lambda});
BUILD_SINGLE_SELECTOR(output->dataType(), fillRandomPoisson_, (context, rng, lambda, output), SD_FLOAT_NATIVE);
NDArray::registerSpecialUse({output}, {lambda});
}
BUILD_SINGLE_TEMPLATE(template void fillRandomPoisson_,
(LaunchContext * context, graph::RandomGenerator& rng, NDArray* lambda, NDArray* output),
SD_FLOAT_NATIVE);
template <typename T>
static SD_KERNEL void fillUniformKernel(graph::RandomGenerator* devRng, T from, T to, T* output,
const sd::LongType* outputShape) {
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
__shared__ sd::LongType outputLen;
if (0 == threadIdx.x) {
outputLen = shape::length(outputShape);
}
__syncthreads();
for (auto i = start; i < outputLen; i += step) {
auto zIndex = shape::getIndexOffset(i, outputShape);
output[zIndex] = devRng->relativeT<T>(i, from, to);
}
}
template <typename T>
static void fillRandomUniform_(LaunchContext* context, graph::RandomGenerator& rng, NDArray* min, NDArray* max,
NDArray* output) {
T minVal = T(0);
T maxVal = DataTypeUtils::infOrMax<T>();
if (min) minVal = min->t<T>(0);
if (max) maxVal = max->t<T>(0);
if (output->isR())
RandomLauncher::fillUniform(context, rng, output, minVal, maxVal);
else {
auto stream = context->getCudaStream();
graph::RandomGenerator* devRng;
auto err = cudaMalloc(&devRng, sizeof(graph::RandomGenerator));
if (err != 0) {
cuda_exception::build("fillRandomUniform_: Cannot allocate device memory for random generator due error", err);
}
err = cudaMemcpy(devRng, &rng, sizeof(graph::RandomGenerator), cudaMemcpyHostToDevice);
if (err != 0) {
cuda_exception::build("fillRandomUniform_: Cannot copy random generator to device", err);
}
auto outputBuf = output->dataBuffer()->specialAsT<T>();
auto outputShape = output->specialShapeInfo();
fillUniformKernel<T><<<128, 128, 128, *stream>>>(devRng, minVal, maxVal, outputBuf, outputShape);
err = cudaStreamSynchronize(*stream);
if (err != 0) {
cuda_exception::build("fillRandomUniform_: Cannot successfully finish kernel call", err);
}
err = cudaFree(devRng);
if (err != 0) {
cuda_exception::build("fillRandomUniform_: Cannot deallocate device memory for random generator", err);
}
}
}
void fillRandomUniform(LaunchContext* context, graph::RandomGenerator& rng, NDArray* min, NDArray* max,
NDArray* output) {
BUILD_SINGLE_SELECTOR(output->dataType(), fillRandomUniform_, (context, rng, min, max, output), SD_NUMERIC_TYPES);
}
///////////////////////////////////////////////////////////////////
// used https://en.wikipedia.org/wiki/Categorical_distribution
// methods: gumbel trick + softmax + argmax
template <typename X, typename Z>
SD_KERNEL static void fillMultiNomialCuda_(graph::RandomGenerator* devRng, const void* vx,
const sd::LongType* xShapeInfo, void* vz, const sd::LongType* zShapeInfo,
const sd::LongType batchValue, const sd::LongType numOfSamples,
const sd::LongType numOfClassX, const sd::LongType dimA, const X minVal,
const X maxVal) {
const X* x = reinterpret_cast<const X*>(vx);
Z* z = reinterpret_cast<Z*>(vz);
__shared__ sd::LongType xDimAstride, zDimAstride, xDimCstride, zDimCstride, dimC;
if (0 == threadIdx.x) {
dimC = (0 == dimA) ? 1 : 0;
zDimAstride = shape::stride(zShapeInfo)[dimA];
xDimAstride = shape::stride(xShapeInfo)[dimA];
zDimCstride = shape::stride(zShapeInfo)[dimC];
xDimCstride = shape::stride(xShapeInfo)[dimC];
}
__syncthreads();
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (sd::LongType index = tid; index < batchValue * numOfSamples; index += gridDim.x * blockDim.x) {
sd::LongType nBatchIndex = index / numOfSamples;
sd::LongType nSampleIndexInBatch = index - (nBatchIndex * numOfSamples);
const X* xTad = x + (nBatchIndex * xDimCstride);
Z* zTad = z + (nBatchIndex * zDimCstride);
Z& arg = zTad[nSampleIndexInBatch * zDimAstride];
X Max = -minVal;
sd::LongType nSamplesPerBatch = nBatchIndex * numOfClassX * numOfSamples;
sd::LongType nClassPerSamples = nSampleIndexInBatch * numOfClassX;
for (sd::LongType nClass = 0; nClass < numOfClassX; nClass++) {
sd::LongType nIndex = nSamplesPerBatch + nClassPerSamples + nClass;
X tValue = (xTad[nClass * xDimAstride] -
sd::math::sd_log<X, X>(-sd::math::sd_log<X, X>(devRng->relativeT<X>(nIndex, minVal, maxVal))));
if (tValue > Max) {
Max = tValue;
arg = nClass;
}
}
}
}
//////////////////////////////////////////////////////////////////////////
template <typename X, typename Z>
SD_HOST static void fillMultiNomialCudaLauncher(const int blocksPerGrid, const int threadsPerBlock,
const cudaStream_t* stream, graph::RandomGenerator* devRng,
const void* vx, const sd::LongType* xShapeInfo, void* vz,
const sd::LongType* zShapeInfo, const sd::LongType batchValue,
const sd::LongType numOfSamples, const sd::LongType numOfClassX,
const sd::LongType dimA) {
const X minVal = DataTypeUtils::min<X>();
const X maxVal = 1.0;
fillMultiNomialCuda_<X, Z><<<blocksPerGrid, threadsPerBlock, 256, *stream>>>(
devRng, vx, xShapeInfo, vz, zShapeInfo, batchValue, numOfSamples, numOfClassX, dimA, minVal, maxVal);
}
///////////////////////////////////////////////////////////////////
void fillRandomMultiNomial(LaunchContext* context, graph::RandomGenerator& rng, NDArray& input, NDArray& output,
const sd::LongType numOfSamples, const int dimC) {
sd::LongType dimA = (0 == dimC) ? 1 : 0;
const sd::LongType batchValue = output.sizeAt(dimC);
const sd::LongType numOfClassX = input.sizeAt(dimA);
const int threadsPerBlock = SD_MAX_NUM_THREADS / 2;
const int blocksPerGrid = (batchValue * numOfSamples + threadsPerBlock - 1) / threadsPerBlock;
PointersManager manager(context, "fillMultinomial");
graph::RandomGenerator* devRng;
auto err = cudaMalloc(&devRng, sizeof(graph::RandomGenerator));
if (err != 0) {
cuda_exception::build("fillRandomMultiNomial: Cannot allocate device memory for random generator due error", err);
}
err = cudaStreamSynchronize(*context->getCudaStream());
if (err != 0) {
cuda_exception::build("fillRandomMultiNomial: Cannot synchronize stream for random generator due error", err);
}
err =
cudaMemcpyAsync(devRng, &rng, sizeof(graph::RandomGenerator), cudaMemcpyHostToDevice, *context->getCudaStream());
if (err != 0) {
cuda_exception::build("fillRandomMultiNomial: Cannot copy random generator to device", err);
}
NDArray::prepareSpecialUse({&output}, {&input});
BUILD_DOUBLE_SELECTOR(input.dataType(), output.dataType(), fillMultiNomialCudaLauncher,
(blocksPerGrid, threadsPerBlock, context->getCudaStream(), devRng, input.specialBuffer(),
input.specialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), batchValue,
numOfSamples, numOfClassX, dimA),
SD_FLOAT_TYPES, SD_INDEXING_TYPES);
NDArray::registerSpecialUse({&output}, {&input});
manager.synchronize();
err = cudaFree(devRng);
if (err != 0) {
cuda_exception::build("fillRandomMultiNomial: Cannot deallocate device memory for random generator", err);
}
rng.rewindH(output.lengthOf() * numOfClassX);
}
} // namespace helpers
} // namespace ops
} // namespace sd
|
the_stack
|
//#include <cutil_inline.h>
#include <cutil_inline.h>
#include <cutil_math.h>
#include "CudaMath.h"
#include "dds.h"
#include "permutations.h"
// Definitions
#define INPUT_IMAGE "lena_std.ppm"
#define REFERENCE_IMAGE "lena_ref.dds"
#if 1
#define ERROR_THRESHOLD 0.1f
#else
#define ERROR_THRESHOLD 0.02f
#endif
#define NUM_THREADS 64 // Number of threads per block.
#if 1
#define __debugsync() __syncthreads()
#else
#define __debugsync()
#endif
template <class T>
__device__ inline void swap(T & a, T & b)
{
T tmp = a;
a = b;
b = tmp;
}
//__constant__ float3 kColorMetric = { 0.2126f, 0.7152f, 0.0722f };
__constant__ float3 kColorMetric = { 1.0f, 1.0f, 1.0f };
////////////////////////////////////////////////////////////////////////////////
// Sort colors
////////////////////////////////////////////////////////////////////////////////
__device__ void sortColors(const float * values, int * ranks)
{
#if 1
if (threadIdx.x == 0)
{
for (int tid = 0; tid < 16; tid++)
{
int rank = 0;
for (int i = 0; i < 16; i++)
{
rank += (values[i] < values[tid]);
}
ranks[tid] = rank;
}
// Resolve elements with the same index.
for (int i = 0; i < 15; i++)
{
for (int tid = 0; tid < 16; tid++)
{
if (tid > i && ranks[tid] == ranks[i]) ++ranks[tid];
}
}
}
#else
const int tid = threadIdx.x;
int rank = 0;
#pragma unroll
for (int i = 0; i < 16; i++)
{
rank += (values[i] < values[tid]);
}
ranks[tid] = rank;
// Resolve elements with the same index.
#pragma unroll
for (int i = 0; i < 15; i++)
{
if (tid > i && ranks[tid] == ranks[i]) ++ranks[tid];
}
#endif
}
////////////////////////////////////////////////////////////////////////////////
// Load color block to shared mem
////////////////////////////////////////////////////////////////////////////////
__device__ void loadColorBlock(const uint * image, float3 colors[16], float3 sums[16], int xrefs[16])
{
const int bid = blockIdx.x;
const int idx = threadIdx.x;
__shared__ float dps[16];
float3 tmp;
if (idx < 16)
{
// Read color and copy to shared mem.
uint c = image[(bid) * 16 + idx];
colors[idx].x = ((c >> 0) & 0xFF) * (1.0f / 255.0f);
colors[idx].y = ((c >> 8) & 0xFF) * (1.0f / 255.0f);
colors[idx].z = ((c >> 16) & 0xFF) * (1.0f / 255.0f);
// No need to synchronize, 16 < warp size.
#if 1
} __debugsync();
#endif
// Sort colors along the best fit line.
colorSums(colors, sums);
float3 axis = bestFitLine(colors, sums[0]);
if (idx < 16) {
dps[idx] = dot(colors[idx], axis);
#if 1
} __debugsync(); if (idx < 16) {
#endif
sortColors(dps, xrefs);
tmp = colors[idx];
#if 1
} __debugsync(); if (idx < 16) {
#endif
colors[xrefs[idx]] = tmp;
}
}
////////////////////////////////////////////////////////////////////////////////
// Round color to RGB565 and expand
////////////////////////////////////////////////////////////////////////////////
inline __device__ float3 roundAndExpand(float3 v, ushort * w)
{
v.x = rintf(__saturatef(v.x) * 31.0f);
v.y = rintf(__saturatef(v.y) * 63.0f);
v.z = rintf(__saturatef(v.z) * 31.0f);
*w = ((ushort)v.x << 11) | ((ushort)v.y << 5) | (ushort)v.z;
v.x *= 0.03227752766457f; // approximate integer bit expansion.
v.y *= 0.01583151765563f;
v.z *= 0.03227752766457f;
return v;
}
__constant__ float alphaTable4[4] = { 9.0f, 0.0f, 6.0f, 3.0f };
__constant__ float alphaTable3[4] = { 4.0f, 0.0f, 2.0f, 2.0f };
__constant__ const int prods4[4] = { 0x090000,0x000900,0x040102,0x010402 };
__constant__ const int prods3[4] = { 0x040000,0x000400,0x040101,0x010401 };
#define USE_TABLES 1
////////////////////////////////////////////////////////////////////////////////
// Evaluate permutations
////////////////////////////////////////////////////////////////////////////////
static __device__ float evalPermutation4(const float3 * colors, uint permutation, ushort * start, ushort * end, float3 color_sum)
{
// Compute endpoints using least squares.
#if USE_TABLES
float3 alphax_sum = make_float3(0.0f, 0.0f, 0.0f);
int akku = 0;
// Compute alpha & beta for this permutation.
for (int i = 0; i < 16; i++)
{
const uint bits = permutation >> (2*i);
alphax_sum += alphaTable4[bits & 3] * colors[i];
akku += prods4[bits & 3];
}
float alpha2_sum = float(akku >> 16);
float beta2_sum = float((akku >> 8) & 0xff);
float alphabeta_sum = float((akku >> 0) & 0xff);
float3 betax_sum = 9*color_sum - alphax_sum;
#else
float alpha2_sum = 0.0f;
float beta2_sum = 0.0f;
float alphabeta_sum = 0.0f;
float3 alphax_sum = make_float3(0.0f, 0.0f, 0.0f);
// Compute alpha & beta for this permutation.
for (int i = 0; i < 16; i++)
{
const uint bits = permutation >> (2*i);
float beta = (bits & 1);
if (bits & 2) beta = (1 + beta) * (1.0f / 3.0f);
float alpha = 1.0f - beta;
alpha2_sum += alpha * alpha;
beta2_sum += beta * beta;
alphabeta_sum += alpha * beta;
alphax_sum += alpha * colors[i];
}
float3 betax_sum = color_sum - alphax_sum;
#endif
// alpha2, beta2, alphabeta and factor could be precomputed for each permutation, but it's faster to recompute them.
const float factor = 1.0f / (alpha2_sum * beta2_sum - alphabeta_sum * alphabeta_sum);
float3 a = (alphax_sum * beta2_sum - betax_sum * alphabeta_sum) * factor;
float3 b = (betax_sum * alpha2_sum - alphax_sum * alphabeta_sum) * factor;
// Round a, b to the closest 5-6-5 color and expand...
a = roundAndExpand(a, start);
b = roundAndExpand(b, end);
// compute the error
float3 e = a * a * alpha2_sum + b * b * beta2_sum + 2.0f * (a * b * alphabeta_sum - a * alphax_sum - b * betax_sum);
return (1.0f / 9.0f) * dot(e, kColorMetric);
}
static __device__ float evalPermutation3(const float3 * colors, uint permutation, ushort * start, ushort * end, float3 color_sum)
{
// Compute endpoints using least squares.
#if USE_TABLES
float3 alphax_sum = make_float3(0.0f, 0.0f, 0.0f);
int akku = 0;
// Compute alpha & beta for this permutation.
for (int i = 0; i < 16; i++)
{
const uint bits = permutation >> (2*i);
alphax_sum += alphaTable3[bits & 3] * colors[i];
akku += prods3[bits & 3];
}
float alpha2_sum = float(akku >> 16);
float beta2_sum = float((akku >> 8) & 0xff);
float alphabeta_sum = float((akku >> 0) & 0xff);
float3 betax_sum = 4*color_sum - alphax_sum;
#else
float alpha2_sum = 0.0f;
float beta2_sum = 0.0f;
float alphabeta_sum = 0.0f;
float3 alphax_sum = make_float3(0.0f, 0.0f, 0.0f);
// Compute alpha & beta for this permutation.
for (int i = 0; i < 16; i++)
{
const uint bits = permutation >> (2*i);
float beta = (bits & 1);
if (bits & 2) beta = 0.5f;
float alpha = 1.0f - beta;
alpha2_sum += alpha * alpha;
beta2_sum += beta * beta;
alphabeta_sum += alpha * beta;
alphax_sum += alpha * colors[i];
}
float3 betax_sum = color_sum - alphax_sum;
#endif
const float factor = 1.0f / (alpha2_sum * beta2_sum - alphabeta_sum * alphabeta_sum);
float3 a = (alphax_sum * beta2_sum - betax_sum * alphabeta_sum) * factor;
float3 b = (betax_sum * alpha2_sum - alphax_sum * alphabeta_sum) * factor;
// Round a, b to the closest 5-6-5 color and expand...
a = roundAndExpand(a, start);
b = roundAndExpand(b, end);
// compute the error
float3 e = a * a * alpha2_sum + b * b * beta2_sum + 2.0f * (a * b * alphabeta_sum - a * alphax_sum - b * betax_sum);
return (1.0f / 4.0f) * dot(e, kColorMetric);
}
__device__ void evalAllPermutations(const float3 * colors, const uint * permutations, ushort & bestStart, ushort & bestEnd, uint & bestPermutation, float * errors, float3 color_sum)
{
const int idx = threadIdx.x;
float bestError = FLT_MAX;
__shared__ uint s_permutations[160];
for(int i = 0; i < 16; i++)
{
int pidx = idx + NUM_THREADS * i;
if (pidx >= 992) break;
ushort start, end;
uint permutation = permutations[pidx];
if (pidx < 160) s_permutations[pidx] = permutation;
float error = evalPermutation4(colors, permutation, &start, &end, color_sum);
if (error < bestError)
{
bestError = error;
bestPermutation = permutation;
bestStart = start;
bestEnd = end;
}
}
if (bestStart < bestEnd)
{
swap(bestEnd, bestStart);
bestPermutation ^= 0x55555555; // Flip indices.
}
for(int i = 0; i < 3; i++)
{
int pidx = idx + NUM_THREADS * i;
if (pidx >= 160) break;
ushort start, end;
uint permutation = s_permutations[pidx];
float error = evalPermutation3(colors, permutation, &start, &end, color_sum);
if (error < bestError)
{
bestError = error;
bestPermutation = permutation;
bestStart = start;
bestEnd = end;
if (bestStart > bestEnd)
{
swap(bestEnd, bestStart);
bestPermutation ^= (~bestPermutation >> 1) & 0x55555555; // Flip indices.
}
}
}
errors[idx] = bestError;
}
////////////////////////////////////////////////////////////////////////////////
// Find index with minimum error
////////////////////////////////////////////////////////////////////////////////
__device__ int findMinError(float * errors)
{
const int idx = threadIdx.x;
__shared__ int indices[NUM_THREADS];
indices[idx] = idx;
#if 1
for(int d = NUM_THREADS/2; d > 0; d >>= 1)
{
__syncthreads();
if (idx < d)
{
float err0 = errors[idx];
float err1 = errors[idx + d];
if (err1 < err0) {
errors[idx] = err1;
indices[idx] = indices[idx + d];
}
}
}
#else
for(int d = NUM_THREADS/2; d > 32; d >>= 1)
{
__syncthreads();
if (idx < d)
{
float err0 = errors[idx];
float err1 = errors[idx + d];
if (err1 < err0) {
errors[idx] = err1;
indices[idx] = indices[idx + d];
}
}
}
__syncthreads();
// unroll last 6 iterations
if (idx < 32)
{
if (errors[idx + 32] < errors[idx]) {
errors[idx] = errors[idx + 32];
indices[idx] = indices[idx + 32];
}
if (errors[idx + 16] < errors[idx]) {
errors[idx] = errors[idx + 16];
indices[idx] = indices[idx + 16];
}
if (errors[idx + 8] < errors[idx]) {
errors[idx] = errors[idx + 8];
indices[idx] = indices[idx + 8];
}
if (errors[idx + 4] < errors[idx]) {
errors[idx] = errors[idx + 4];
indices[idx] = indices[idx + 4];
}
if (errors[idx + 2] < errors[idx]) {
errors[idx] = errors[idx + 2];
indices[idx] = indices[idx + 2];
}
if (errors[idx + 1] < errors[idx]) {
errors[idx] = errors[idx + 1];
indices[idx] = indices[idx + 1];
}
}
#endif
__syncthreads();
return indices[0];
}
////////////////////////////////////////////////////////////////////////////////
// Save DXT block
////////////////////////////////////////////////////////////////////////////////
__device__ void saveBlockDXT1(ushort start, ushort end, uint permutation, int xrefs[16], uint2 * result)
{
const int bid = blockIdx.x;
if (start == end)
{
permutation = 0;
}
// Reorder permutation.
uint indices = 0;
for(int i = 0; i < 16; i++)
{
int ref = xrefs[i];
indices |= ((permutation >> (2 * ref)) & 3) << (2 * i);
}
// Write endpoints.
result[bid].x = (end << 16) | start;
// Write palette indices.
result[bid].y = indices;
}
////////////////////////////////////////////////////////////////////////////////
// Compress color block
////////////////////////////////////////////////////////////////////////////////
__global__ void compress(const uint * permutations, const uint * image, uint2 * result)
{
const int idx = threadIdx.x;
__shared__ float3 colors[16];
__shared__ float3 sums[16];
__shared__ int xrefs[16];
loadColorBlock(image, colors, sums, xrefs);
__syncthreads();
ushort bestStart, bestEnd;
uint bestPermutation;
__shared__ float errors[NUM_THREADS];
evalAllPermutations(colors, permutations, bestStart, bestEnd, bestPermutation, errors, sums[0]);
// Use a parallel reduction to find minimum error.
const int minIdx = findMinError(errors);
__syncthreads();
// Only write the result of the winner thread.
if (idx == minIdx)
{
saveBlockDXT1(bestStart, bestEnd, bestPermutation, xrefs, result);
}
}
// Helper structs and functions to validate the output of the compressor.
// We cannot simply do a bitwise compare, because different compilers produce different
// results for different targets due to floating point arithmetic.
union Color32 {
struct {
unsigned char b, g, r, a;
};
unsigned int u;
};
union Color16 {
struct {
unsigned short b : 5;
unsigned short g : 6;
unsigned short r : 5;
};
unsigned short u;
};
struct BlockDXT1
{
Color16 col0;
Color16 col1;
union {
unsigned char row[4];
unsigned int indices;
};
void decompress(Color32 colors[16]) const;
};
void BlockDXT1::decompress(Color32 * colors) const
{
Color32 palette[4];
// Does bit expansion before interpolation.
palette[0].b = (col0.b << 3) | (col0.b >> 2);
palette[0].g = (col0.g << 2) | (col0.g >> 4);
palette[0].r = (col0.r << 3) | (col0.r >> 2);
palette[0].a = 0xFF;
palette[1].r = (col1.r << 3) | (col1.r >> 2);
palette[1].g = (col1.g << 2) | (col1.g >> 4);
palette[1].b = (col1.b << 3) | (col1.b >> 2);
palette[1].a = 0xFF;
if( col0.u > col1.u ) {
// Four-color block: derive the other two colors.
palette[2].r = (2 * palette[0].r + palette[1].r) / 3;
palette[2].g = (2 * palette[0].g + palette[1].g) / 3;
palette[2].b = (2 * palette[0].b + palette[1].b) / 3;
palette[2].a = 0xFF;
palette[3].r = (2 * palette[1].r + palette[0].r) / 3;
palette[3].g = (2 * palette[1].g + palette[0].g) / 3;
palette[3].b = (2 * palette[1].b + palette[0].b) / 3;
palette[3].a = 0xFF;
}
else {
// Three-color block: derive the other color.
palette[2].r = (palette[0].r + palette[1].r) / 2;
palette[2].g = (palette[0].g + palette[1].g) / 2;
palette[2].b = (palette[0].b + palette[1].b) / 2;
palette[2].a = 0xFF;
palette[3].r = 0x00;
palette[3].g = 0x00;
palette[3].b = 0x00;
palette[3].a = 0x00;
}
for (int i = 0; i < 16; i++)
{
colors[i] = palette[(indices >> (2*i)) & 0x3];
}
}
static int compareColors(const Color32 * b0, const Color32 * b1)
{
int sum = 0;
for (int i = 0; i < 16; i++)
{
int r = (b0[i].r - b1[i].r);
int g = (b0[i].g - b1[i].g);
int b = (b0[i].b - b1[i].b);
sum += r*r + g*g + b*b;
}
return sum;
}
static int compareBlock(const BlockDXT1 * b0, const BlockDXT1 * b1)
{
Color32 colors0[16];
Color32 colors1[16];
if (memcmp(b0, b1, sizeof(BlockDXT1)) == 0)
{
return 0;
}
else
{
b0->decompress(colors0);
b1->decompress(colors1);
return compareColors(colors0, colors1);
}
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv)
{
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") )
cutilDeviceInit(argc, argv);
else
cudaSetDevice( cutGetMaxGflopsDeviceId() );
// Load input image.
unsigned char * data = NULL;
uint W, H;
char* image_path = cutFindFilePath(INPUT_IMAGE, argv[0]);
if (image_path == 0) {
printf("Error, unable to find source image\n");
cudaThreadExit();
exit(EXIT_FAILURE);
}
if (!cutLoadPPM4ub( image_path, &data, &W, &H)) {
printf("Error, unable to open source image\n");
cudaThreadExit();
exit(EXIT_FAILURE);
}
uint w, h;
#if 1
// Reduce the image size so that it doesn't take so long on emulation.
w = W >> 4;
h = H >> 4;
#else
w = W >> 4;
h = H >> 4;
#endif
// Allocate input image.
const uint memSize = w * h * 4;
cutilCondition( 0 != memSize );
uint * image = (uint *) malloc(memSize);
// Convert linear image to block linear.
for(uint by = 0; by < h/4; by++) {
for(uint bx = 0; bx < w/4; bx++) {
for (int i = 0; i < 16; i++) {
const int x = i & 3;
const int y = i / 4;
image[(by * w/4 + bx) * 16 + i] =
((uint *)data)[(by * 4 + y) * 4 * (W/4) + bx * 4 + x];
}
}
}
// copy into global mem
uint * d_data = NULL;
cutilSafeCall( cudaMalloc((void**) &d_data, memSize) );
// Result
uint * d_result = NULL;
const uint compressedSize = (w / 4) * (h / 4) * 8;
cutilSafeCall( cudaMalloc((void**) &d_result, compressedSize) );
uint * result = (uint *)malloc(compressedSize);
// Compute permutations.
uint permutations[1024];
computePermutations(permutations);
// Upload permutations.
uint * d_permutations = NULL;
cutilSafeCall( cudaMalloc((void**) &d_permutations, 1024 * sizeof(uint)) );
cutilSafeCall( cudaMemcpy(d_permutations, permutations, 1024 * sizeof(uint),
cudaMemcpyHostToDevice) );
uint timer;
cutilCheckError(cutCreateTimer(&timer));
cutilSafeCall(cudaThreadSynchronize());
cutilCheckError(cutStartTimer(timer));
// Upload image.
cutilSafeCall( cudaMemcpy(d_data, image, memSize, cudaMemcpyHostToDevice) );
uint blocks = ((w + 3) / 4) * ((h + 3) / 4);
compress<<<blocks, NUM_THREADS>>>(d_permutations, d_data, (uint2 *)d_result);
cutilCheckMsg("compress");
cutilSafeCall(cudaMemcpy(result, d_result, compressedSize, cudaMemcpyDeviceToHost));
cutilSafeCall(cudaThreadSynchronize());
cutilCheckError(cutStopTimer(timer));
float time = cutGetTimerValue(timer);
printf("Time %f msec\n", time);
char output_filename[1024];
strcpy(output_filename, image_path);
strcpy(output_filename + strlen(image_path) - 3, "dds");
// Write DDS file.
FILE * fp = fopen(output_filename, "wb");
if (fp == 0) {
printf("Error, unable to open output image\n");
cudaThreadExit();
exit(EXIT_FAILURE);
}
DDSHeader header;
header.fourcc = FOURCC_DDS;
header.size = 124;
header.flags = (DDSD_WIDTH|DDSD_HEIGHT|DDSD_CAPS|DDSD_PIXELFORMAT|DDSD_LINEARSIZE);
header.height = h;
header.width = w;
header.pitch = compressedSize;
header.depth = 0;
header.mipmapcount = 0;
memset(header.reserved, 0, sizeof(header.reserved));
header.pf.size = 32;
header.pf.flags = DDPF_FOURCC;
header.pf.fourcc = FOURCC_DXT1;
header.pf.bitcount = 0;
header.pf.rmask = 0;
header.pf.gmask = 0;
header.pf.bmask = 0;
header.pf.amask = 0;
header.caps.caps1 = DDSCAPS_TEXTURE;
header.caps.caps2 = 0;
header.caps.caps3 = 0;
header.caps.caps4 = 0;
header.notused = 0;
fwrite(&header, sizeof(DDSHeader), 1, fp);
fwrite(result, compressedSize, 1, fp);
fclose(fp);
// Make sure the generated image is correct.
const char* reference_image_path = cutFindFilePath(REFERENCE_IMAGE, argv[0]);
if (reference_image_path == 0) {
printf("Error, unable to find reference image\n");
cudaThreadExit();
exit(EXIT_FAILURE);
}
fp = fopen(reference_image_path, "rb");
if (fp == 0) {
printf("Error, unable to open reference image\n");
cudaThreadExit();
exit(EXIT_FAILURE);
}
fseek(fp, sizeof(DDSHeader), SEEK_SET);
uint referenceSize = (W / 4) * (H / 4) * 8;
uint * reference = (uint *)malloc(referenceSize);
fread(reference, referenceSize, 1, fp);
fclose(fp);
float rms = 0;
for (uint y = 0; y < h; y += 4)
{
for (uint x = 0; x < w; x += 4)
{
uint referenceBlockIdx = ((y/4) * (W/4) + (x/4));
uint resultBlockIdx = ((y/4) * (w/4) + (x/4));
int cmp = compareBlock(((BlockDXT1 *)result) + resultBlockIdx, ((BlockDXT1 *)reference) + referenceBlockIdx);
if (cmp != 0.0f) {
printf("Error at (%d, %d):\t%f rms\n", x/4, y/4, float(cmp)/16/3);
}
rms += cmp;
}
}
rms /= w * h * 3;
printf("RMS(reference, result) = %f\n", rms);
if (rms <= ERROR_THRESHOLD)
{
printf("Test PASSED\n");
}
else
{
printf("Test FAILED\n");
}
// Free allocated memory.
cutFree(image_path);
cutFree(data);
free(image);
cutilSafeCall(cudaFree(d_permutations));
cutilSafeCall(cudaFree(d_data));
cutilSafeCall(cudaFree(d_result));
free(result);
cutilCheckError(cutDeleteTimer(timer));
cudaThreadExit();
cutilExit(argc, argv);
}
|
the_stack
|
// For compatibility with Pytorch 1.1
#ifndef TORCH_CHECK
#define TORCH_CHECK AT_CHECK
#endif
// #define thc_cos THCNumerics<scalar_t>::cos
// #define thc_sin THCNumerics<scalar_t>::sin
#define thc_cos std::cos
#define thc_sin std::sin
#define FULL_MASK 0xffffffff
static constexpr int MAX_BLOCK_SIZE = 1024;
static constexpr int WORK_PER_THREAD = 16;
static constexpr int ELEMENTARY_SIZE = MAX_BLOCK_SIZE / 2;
static constexpr int MAX_N_FACTORS = 10;
template <typename T, size_t N>
using CudaAcsr32 = at::PackedTensorAccessor32<T, N, at::RestrictPtrTraits>;
__host__ __device__ static inline int64_t div_up(int64_t a, int64_t b) {
return (a + b - 1) / b;
}
__host__ __device__ static inline int div_up(int a, int b) {
return (a + b - 1) / b;
}
template <typename scalar_t>
static __device__ __forceinline__ void atomicAdd(thrust::complex<scalar_t> *address, thrust::complex<scalar_t> val) {
atomicAdd((scalar_t *)address, val.real());
atomicAdd((scalar_t *)address + 1, val.imag());
}
template <typename scalar_t>
static __device__ __forceinline__ thrust::complex<scalar_t> __shfl_down_sync(unsigned int mask, thrust::complex<scalar_t> value, unsigned int delta, int width = warpSize) {
return thrust::complex<scalar_t>(__shfl_down_sync(mask, value.real(), delta, width),
__shfl_down_sync(mask, value.imag(), delta, width));
}
// 2x2 matrix [a, b; c, d] multiplied by a vector [x, y]
template <typename scalar_t>
static __device__ __forceinline__ thrust::pair<scalar_t, scalar_t> mult2x2(scalar_t a, scalar_t b,
scalar_t c, scalar_t d,
scalar_t x, scalar_t y) {
return thrust::make_pair(a * x + b * y, c * x + d * y);
}
template <typename scalar_t>
__global__ void butterfly_factor_multiply_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 3> twiddle_a,
const at::PackedTensorAccessor64<scalar_t, 3> input_a,
at::PackedTensorAccessor64<scalar_t, 3> output_a) {
const auto batch_size = input_a.size(0);
const auto n = input_a.size(2);
for (int64_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
const scalar_t twiddle_val[2][2] = {{twiddle_a[0][0][i], twiddle_a[0][1][i]},
{twiddle_a[1][0][i], twiddle_a[1][1][i]}};
for (int64_t b = blockIdx.y * blockDim.y + threadIdx.y; b < batch_size; b += blockDim.y * gridDim.y) {
const scalar_t input_val[2] = {input_a[b][0][i], input_a[b][1][i]};
#pragma unroll
for (int j = 0; j <= 1; ++j) {
output_a[b][j][i] = twiddle_val[j][0] * input_val[0] + twiddle_val[j][1] * input_val[1];
}
}
}
}
template <typename scalar_t>
__global__ void butterfly_factor_multiply_complex_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 4> twiddle_a,
const at::PackedTensorAccessor64<scalar_t, 4> input_a,
at::PackedTensorAccessor64<scalar_t, 4> output_a) {
const auto batch_size = input_a.size(0);
const auto n = input_a.size(2);
for (int64_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
const scalar_t twiddle_val[2][2][2] = {{{twiddle_a[0][0][i][0], twiddle_a[0][0][i][1]},
{twiddle_a[0][1][i][0], twiddle_a[0][1][i][1]}},
{{twiddle_a[1][0][i][0], twiddle_a[1][0][i][1]},
{twiddle_a[1][1][i][0], twiddle_a[1][1][i][1]}}};
for (int64_t b = blockIdx.y * blockDim.y + threadIdx.y; b < batch_size; b += blockDim.y * gridDim.y) {
const scalar_t input_val[2][2] = {{input_a[b][0][i][0], input_a[b][0][i][1]},
{input_a[b][1][i][0], input_a[b][1][i][1]}};
#pragma unroll
for (int j = 0; j <= 1; ++j) {
output_a[b][j][i][0] = twiddle_val[j][0][0] * input_val[0][0] - twiddle_val[j][0][1] * input_val[0][1]
+ twiddle_val[j][1][0] * input_val[1][0] - twiddle_val[j][1][1] * input_val[1][1];
output_a[b][j][i][1] = twiddle_val[j][0][0] * input_val[0][1] + twiddle_val[j][0][1] * input_val[0][0]
+ twiddle_val[j][1][0] * input_val[1][1] + twiddle_val[j][1][1] * input_val[1][0];
}
}
}
}
void butterfly_factor_multiply_cuda(const at::Tensor& twiddle, const at::Tensor& input, at::Tensor& output) {
const auto batch_size = input.size(0);
const auto n = input.size(2);
dim3 block;
block.x = std::min<int64_t>(MAX_BLOCK_SIZE, n);
block.y = div_up(MAX_BLOCK_SIZE, block.x);
dim3 grid(div_up(n, block.x), div_up(batch_size, block.y * WORK_PER_THREAD));
AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "butterfly_factor_multiply_cuda", [&] {
switch (input.dim()) {
case 3: // real
{
const auto twiddle_a = twiddle.packed_accessor64<scalar_t, 3>();
const auto input_a = input.packed_accessor64<scalar_t, 3>();
auto output_a = output.packed_accessor64<scalar_t, 3>();
butterfly_factor_multiply_cuda_kernel<scalar_t>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, input_a, output_a);
break;
}
case 4: // complex
{
const auto twiddle_a = twiddle.packed_accessor64<scalar_t, 4>();
const auto input_a = input.packed_accessor64<scalar_t, 4>();
auto output_a = output.packed_accessor64<scalar_t, 4>();
butterfly_factor_multiply_complex_cuda_kernel<scalar_t>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, input_a, output_a);
break;
}
default:
AT_ERROR("butterfly_factor_multiply requires input dimension 3 or 4");
}
});
TORCH_CHECK(cudaGetLastError() == cudaSuccess,
"butterfly_factor_multiply_cuda failed with error code ",
cudaGetLastError());
}
template <typename T>
__device__ __forceinline__ T sum_strided(T val, T *temp, int stride, int len, int thread_id) {
if (stride >= len) {
return val;
}
// Warp reduction
for (int offset = warpSize / 2; offset >= stride; offset /= 2) {
val += __shfl_down_sync(FULL_MASK, val, offset);
}
// Block reduction
int block_reduction_stride = max(warpSize, stride);
int n_block_reductions = div_up(len, block_reduction_stride);
__syncthreads(); // Otherwise previous reads might be wrong
if (thread_id < len) {
temp[(thread_id % block_reduction_stride) * n_block_reductions + (thread_id / block_reduction_stride)] = val;
}
__syncthreads();
if (thread_id < n_block_reductions * stride) {
val = temp[thread_id];
for (int offset = n_block_reductions / 2; offset > 0; offset /= 2) {
val += __shfl_down_sync(FULL_MASK, val, offset);
}
}
return val;
}
template <typename scalar_t>
__global__ void butterfly_factor_multiply_backward_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 3> grad_a,
const at::PackedTensorAccessor64<scalar_t, 3> twiddle_a,
const at::PackedTensorAccessor64<scalar_t, 3> input_a,
// at::PackedTensorAccessor64<scalar_t, 4> d_twiddle_expanded_a,
at::PackedTensorAccessor64<scalar_t, 3> d_twiddle_expanded_a,
at::PackedTensorAccessor64<scalar_t, 3> d_input_a) {
const int batch_size = input_a.size(0);
const int n = input_a.size(2);
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
const scalar_t twiddle_val[2][2] = {{twiddle_a[0][0][i], twiddle_a[0][1][i]},
{twiddle_a[1][0][i], twiddle_a[1][1][i]}};
scalar_t d_twiddle_val[2][2] = {{0, 0}, {0, 0}};
const int b_start = blockIdx.y * blockDim.y + threadIdx.y;
// for (int64_t b = blockIdx.y * blockDim.y + threadIdx.y; b < batch_size; b += blockDim.y * gridDim.y) {
for (int b = b_start; b < batch_size; b += blockDim.y * gridDim.y) {
const scalar_t input_val[2] = {input_a[b][0][i], input_a[b][1][i]};
const scalar_t grad_val[2] = {grad_a[b][0][i], grad_a[b][1][i]};
#pragma unroll
for (int j = 0; j <= 1; ++j) {
// d_twiddle_expanded_a[b][j][0][i] = grad_val[j] * input_val[0];
// d_twiddle_expanded_a[b][j][1][i] = grad_val[j] * input_val[1];
// atomicAdd(&d_twiddle_expanded_a[j][0][i], grad_val[j] * input_val[0]);
// atomicAdd(&d_twiddle_expanded_a[j][1][i], grad_val[j] * input_val[1]);
d_twiddle_val[j][0] += grad_val[j] * input_val[0];
d_twiddle_val[j][1] += grad_val[j] * input_val[1];
d_input_a[b][j][i] = twiddle_val[0][j] * grad_val[0] + twiddle_val[1][j] * grad_val[1];
}
}
// int tid = threadIdx.x + threadIdx.y * blockDim.x;
// int nthreads = blockDim.x * blockDim.y;
// __shared__ scalar_t temp_storage[MAX_BLOCK_SIZE];
// if (n < nthreads) {
// int lane = tid % warpSize;
// int wid = tid / warpSize;
// #pragma unroll
// for (int j = 0; j <= 1; ++j) {
// d_twiddle_val[j][0] = sum_strided(d_twiddle_val[j][0], temp_storage, n, nthreads, tid);
// d_twiddle_val[j][1] = sum_strided(d_twiddle_val[j][1], temp_storage, n, nthreads, tid);
// }
// int reduction_stride = max(warpSize, n);
// int n_block_reductions = div_up(nthreads, reduction_stride);
// if ((lane % n_block_reductions == 0) && (wid < n)) {
// #pragma unroll
// for (int j = 0; j <= 1; ++j) {
// atomicAdd(&d_twiddle_expanded_a[j][0][tid / n_block_reductions], d_twiddle_val[j][0]);
// atomicAdd(&d_twiddle_expanded_a[j][1][tid / n_block_reductions], d_twiddle_val[j][1]);
// }
// }
// } else {
// #pragma unroll
// for (int j = 0; j <= 1; ++j) {
// atomicAdd(&d_twiddle_expanded_a[j][0][i], d_twiddle_val[j][0]);
// atomicAdd(&d_twiddle_expanded_a[j][1][i], d_twiddle_val[j][1]);
// }
// }
// Warp reduction
for (int offset = warpSize / 2; offset >= n; offset /= 2) {
#pragma unroll
for (int j = 0; j <= 1; ++j) {
d_twiddle_val[j][0] += __shfl_down_sync(FULL_MASK, d_twiddle_val[j][0], offset);
d_twiddle_val[j][1] += __shfl_down_sync(FULL_MASK, d_twiddle_val[j][1], offset);
}
}
__shared__ scalar_t s_d_twiddle[MAX_BLOCK_SIZE * 4];
// // const scalar_t (*temp)[n] = (scalar_t (*)[n])(&s_d_twiddle[0]);
int tid = threadIdx.x + threadIdx.y * blockDim.x;
int nthreads = blockDim.x * blockDim.y;
int lane = tid % warpSize;
int wid = tid / warpSize;
if (n < nthreads) {
__syncthreads();
s_d_twiddle[tid] = 0;
s_d_twiddle[tid + MAX_BLOCK_SIZE] = 0;
s_d_twiddle[tid + 2 * MAX_BLOCK_SIZE] = 0;
s_d_twiddle[tid + 3 * MAX_BLOCK_SIZE] = 0;
__syncthreads();
if (lane < n) {
atomicAdd(&s_d_twiddle[i], d_twiddle_val[0][0]);
atomicAdd(&s_d_twiddle[i + MAX_BLOCK_SIZE], d_twiddle_val[0][1]);
atomicAdd(&s_d_twiddle[i + 2 * MAX_BLOCK_SIZE], d_twiddle_val[1][0]);
atomicAdd(&s_d_twiddle[i + 3 * MAX_BLOCK_SIZE], d_twiddle_val[1][1]);
}
__syncthreads();
if (tid < n) {
atomicAdd(&d_twiddle_expanded_a[0][0][i], s_d_twiddle[i]);
atomicAdd(&d_twiddle_expanded_a[0][1][i], s_d_twiddle[i + MAX_BLOCK_SIZE]);
atomicAdd(&d_twiddle_expanded_a[1][0][i], s_d_twiddle[i + 2 * MAX_BLOCK_SIZE]);
atomicAdd(&d_twiddle_expanded_a[1][1][i], s_d_twiddle[i + 3 * MAX_BLOCK_SIZE]);
}
} else {
#pragma unroll
for (int j = 0; j <= 1; ++j) {
atomicAdd(&d_twiddle_expanded_a[j][0][i], d_twiddle_val[j][0]);
atomicAdd(&d_twiddle_expanded_a[j][1][i], d_twiddle_val[j][1]);
}
}
// // Block reduction
// if (n < nthreads) {
// // if (n < 0) {
// int reduction_stride = max(warpSize, n);
// int n_block_reductions = div_up(nthreads, reduction_stride);
// if (lane < n) {
// // When filling in the shared memory, we assume that n is a power of 2,
// // otherwise we might have uninitialized values in the array.
// s_d_twiddle[(tid % n) * n_block_reductions + (tid / reduction_stride)] = d_twiddle_val[0][0];
// s_d_twiddle[(tid % n) * n_block_reductions + (tid / reduction_stride) + n * n_block_reductions] = d_twiddle_val[0][1];
// s_d_twiddle[(tid % n) * n_block_reductions + (tid / reduction_stride) + 2 * n * n_block_reductions] = d_twiddle_val[1][0];
// s_d_twiddle[(tid % n) * n_block_reductions + (tid / reduction_stride) + 3 * n * n_block_reductions] = d_twiddle_val[1][1];
// }
// __syncthreads();
// // if (tid == 0) {
// // for (int j = 0; j < 4 * n * n_block_reductions; ++j) {
// // printf("%i: %f\n", j, s_d_twiddle[j]);
// // }
// // }
// if (wid < n) {
// d_twiddle_val[0][0] = s_d_twiddle[tid];
// d_twiddle_val[0][1] = s_d_twiddle[tid + n * n_block_reductions];
// d_twiddle_val[1][0] = s_d_twiddle[tid + 2 * n * n_block_reductions];
// d_twiddle_val[1][1] = s_d_twiddle[tid + 3 * n * n_block_reductions];
// for (int offset = n_block_reductions / 2; offset > 0; offset /= 2) {
// #pragma unroll
// for (int j = 0; j <= 1; ++j) {
// d_twiddle_val[j][0] += __shfl_down_sync(FULL_MASK, d_twiddle_val[j][0], offset);
// d_twiddle_val[j][1] += __shfl_down_sync(FULL_MASK, d_twiddle_val[j][1], offset);
// }
// }
// if (lane % n_block_reductions == 0) {
// #pragma unroll
// for (int j = 0; j <= 1; ++j) {
// atomicAdd(&d_twiddle_expanded_a[j][0][tid / n_block_reductions], d_twiddle_val[j][0]);
// atomicAdd(&d_twiddle_expanded_a[j][1][tid / n_block_reductions], d_twiddle_val[j][1]);
// }
// }
// }
// // } else {
// } else if (lane < n) {
// #pragma unroll
// for (int j = 0; j <= 1; ++j) {
// atomicAdd(&d_twiddle_expanded_a[j][0][i], d_twiddle_val[j][0]);
// atomicAdd(&d_twiddle_expanded_a[j][1][i], d_twiddle_val[j][1]);
// }
// }
// if (lane < n) {
// #pragma unroll
// for (int j = 0; j <= 1; ++j) {
// atomicAdd(&d_twiddle_expanded_a[j][0][i], d_twiddle_val[j][0]);
// atomicAdd(&d_twiddle_expanded_a[j][1][i], d_twiddle_val[j][1]);
// }
// }
// #pragma unroll
// for (int j = 0; j <= 1; ++j) {
// d_twiddle_expanded_a[b_start][j][0][i] = d_twiddle_val[j][0];
// d_twiddle_expanded_a[b_start][j][1][i] = d_twiddle_val[j][1];
// }
}
}
template <typename scalar_t>
__global__ void butterfly_factor_multiply_complex_backward_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 4> grad_a,
const at::PackedTensorAccessor64<scalar_t, 4> twiddle_a,
const at::PackedTensorAccessor64<scalar_t, 4> input_a,
at::PackedTensorAccessor64<scalar_t, 5> d_twiddle_expanded_a,
at::PackedTensorAccessor64<scalar_t, 4> d_input_a) {
const auto batch_size = input_a.size(0);
const auto n = input_a.size(2);
for (int64_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
const scalar_t twiddle_val[2][2][2] = {{{twiddle_a[0][0][i][0], twiddle_a[0][0][i][1]},
{twiddle_a[0][1][i][0], twiddle_a[0][1][i][1]}},
{{twiddle_a[1][0][i][0], twiddle_a[1][0][i][1]},
{twiddle_a[1][1][i][0], twiddle_a[1][1][i][1]}}};
for (int64_t b = blockIdx.y * blockDim.y + threadIdx.y; b < batch_size; b += blockDim.y * gridDim.y) {
const scalar_t input_val[2][2] = {{input_a[b][0][i][0], input_a[b][0][i][1]},
{input_a[b][1][i][0], input_a[b][1][i][1]}};
const scalar_t grad_val[2][2] = {{grad_a[b][0][i][0], grad_a[b][0][i][1]},
{grad_a[b][1][i][0], grad_a[b][1][i][1]}};
#pragma unroll
for (int j = 0; j <= 1; ++j) {
d_twiddle_expanded_a[b][j][0][i][0] = grad_val[j][0] * input_val[0][0] + grad_val[j][1] * input_val[0][1];
d_twiddle_expanded_a[b][j][0][i][1] = -grad_val[j][0] * input_val[0][1] + grad_val[j][1] * input_val[0][0];
d_twiddle_expanded_a[b][j][1][i][0] = grad_val[j][0] * input_val[1][0] + grad_val[j][1] * input_val[1][1];
d_twiddle_expanded_a[b][j][1][i][1] = -grad_val[j][0] * input_val[1][1] + grad_val[j][1] * input_val[1][0];
d_input_a[b][j][i][0] = twiddle_val[0][j][0] * grad_val[0][0] + twiddle_val[0][j][1] * grad_val[0][1]
+ twiddle_val[1][j][0] * grad_val[1][0] + twiddle_val[1][j][1] * grad_val[1][1];
d_input_a[b][j][i][1] = twiddle_val[0][j][0] * grad_val[0][1] - twiddle_val[0][j][1] * grad_val[0][0]
+ twiddle_val[1][j][0] * grad_val[1][1] - twiddle_val[1][j][1] * grad_val[1][0];
}
}
}
}
void butterfly_factor_multiply_backward_cuda(const at::Tensor& grad, const at::Tensor& twiddle, const at::Tensor& input,
at::Tensor& d_twiddle_expanded, at::Tensor& d_input) {
const auto batch_size = input.size(0);
const auto n = input.size(2);
dim3 block;
block.x = std::min<int64_t>(MAX_BLOCK_SIZE, n);
block.y = div_up(MAX_BLOCK_SIZE, block.x);
dim3 grid(div_up(n, block.x), div_up(batch_size, block.y * WORK_PER_THREAD));
// AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "butterfly_factor_multiply_backward_cuda", [&] {
AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "butterfly_factor_multiply_backward_cuda", [&] {
switch (input.dim()) {
case 3: // real
{
const auto grad_a = grad.packed_accessor64<scalar_t, 3>();
const auto twiddle_a = twiddle.packed_accessor64<scalar_t, 3>();
const auto input_a = input.packed_accessor64<scalar_t, 3>();
// auto d_twiddle_expanded_a = d_twiddle_expanded.packed_accessor64<scalar_t, 4>();
auto d_twiddle_expanded_a = d_twiddle_expanded.packed_accessor64<scalar_t, 3>();
auto d_input_a = d_input.packed_accessor64<scalar_t, 3>();
butterfly_factor_multiply_backward_cuda_kernel<scalar_t>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(grad_a, twiddle_a, input_a, d_twiddle_expanded_a, d_input_a);
break;
}
case 4: // complex
{
const auto grad_a = grad.packed_accessor64<scalar_t, 4>();
const auto twiddle_a = twiddle.packed_accessor64<scalar_t, 4>();
const auto input_a = input.packed_accessor64<scalar_t, 4>();
auto d_twiddle_expanded_a = d_twiddle_expanded.packed_accessor64<scalar_t, 5>();
auto d_input_a = d_input.packed_accessor64<scalar_t, 4>();
butterfly_factor_multiply_complex_backward_cuda_kernel<scalar_t>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(grad_a, twiddle_a, input_a, d_twiddle_expanded_a, d_input_a);
break;
}
default:
AT_ERROR("butterfly_factor_multiply requires input dimension 3 or 4");
}
});
TORCH_CHECK(cudaGetLastError() == cudaSuccess,
"butterfly_factor_multiply_backward_cuda failed with error code ",
cudaGetLastError());
}
template <int LENGTH, typename T>
__device__ __forceinline__ void sum_strided_atomic(T (&val)[LENGTH], T *storage, int stride, int nthreads, int tid) {
// Warp reduction
for (int offset = warpSize / 2; offset >= stride; offset /= 2) {
#pragma unroll
for (int j = 0; j < LENGTH; j++) {
val[j] += __shfl_down_sync(FULL_MASK, val[j], offset);
}
}
// Block reduction
__syncthreads(); // Need this, otherwise might overwrite before other threads can read twiddle values
if (tid < stride) {
#pragma unroll
for (int j = 0; j < LENGTH; j++) {
storage[j * stride + tid] = 0;
}
}
__syncthreads();
int lane = tid & (warpSize - 1); // int lane = tid % waprSize;
if (lane < stride) {
#pragma unroll
for (int j = 0; j < LENGTH; j++) {
// atomicAdd(&storage[j * stride + tid % stride], val[j]);
atomicAdd(&storage[j * stride + (tid & (stride - 1))], val[j]);
}
}
__syncthreads();
}
/* Sum elements that are @stride apart by exchanging, using shared memory.
After the function, threads with @tid < n_block_reductions * stride and @tid % n_block_reductions == 0
contains the sums.
*/
template <int LENGTH, typename T>
__device__ __forceinline__ void sum_strided_exchange(T (&val)[LENGTH], T *storage, int log_stride, int nthreads, int tid) {
int stride = 1 << log_stride;
// Warp reduction
for (int offset = warpSize / 2; offset >= stride; offset /= 2) {
#pragma unroll
for (int j = 0; j < LENGTH; j++) {
val[j] += __shfl_down_sync(FULL_MASK, val[j], offset);
}
}
int block_reduction_stride = max(warpSize, stride);
// int n_block_reductions = div_up(nthreads, block_reduction_stride);
int n_block_reductions = (nthreads + block_reduction_stride - 1) >> max(5, log_stride);
int lane = tid % warpSize;
__syncthreads(); // Otherwise previous reads might be wrong
if ((tid < nthreads) && (lane < stride)) {
#pragma unroll
for (int j = 0; j < LENGTH; j++) {
// storage[j * nthreads + (tid % block_reduction_stride) * n_block_reductions + (tid / block_reduction_stride)] = val[j];
storage[j * nthreads + (tid & (block_reduction_stride - 1)) * n_block_reductions + (tid / block_reduction_stride)] = val[j];
}
}
__syncthreads();
if (tid < n_block_reductions * stride) {
#pragma unroll
for (int j = 0; j < LENGTH; j++) {
val[j] = storage[j * nthreads + tid];
}
for (int offset = n_block_reductions / 2; offset > 0; offset /= 2) {
#pragma unroll
for (int j = 0; j < LENGTH; j++) {
val[j] += __shfl_down_sync(FULL_MASK, val[j], offset);
}
}
}
}
template <typename scalar_t, bool increasing_stride, bool return_intermediates>
__global__ void butterfly_multiply_intermediate_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 4> twiddle_a,
at::PackedTensorAccessor64<scalar_t, 4> output_a,
int log_max_stride,
int log_n) {
const int batch_size = output_a.size(1);
const int s = blockIdx.z;
const int max_stride = 1 << log_max_stride;
const int input_base_idx = blockIdx.x * blockDim.x * 2;
__shared__ scalar_t s_input[ELEMENTARY_SIZE * 2];
__shared__ scalar_t s_twiddle[ELEMENTARY_SIZE][2][2];
int b = blockIdx.y * blockDim.y + threadIdx.y;
if (b < batch_size) { // Currently we assume 1 batch per thread block, so all threads in the block should enter (otherwise deadlock)
int first_idx = increasing_stride ? 0 : log_n - 1 - log_max_stride;
for (int i = threadIdx.x; i < max_stride * 2; i += blockDim.x) {
s_input[i] = output_a[first_idx][b][s][input_base_idx + i];
}
int i = threadIdx.x;
for (int idx = first_idx; idx <= first_idx + log_max_stride; ++idx) {
int log_stride = increasing_stride ? idx : log_n - 1 - idx;
int stride = 1 << log_stride;
int twiddle_start_idx = stride - 1;
if (i < stride) {
s_twiddle[i][0][0] = twiddle_a[s][twiddle_start_idx + i][0][0];
s_twiddle[i][0][1] = twiddle_a[s][twiddle_start_idx + i][0][1];
s_twiddle[i][1][0] = twiddle_a[s][twiddle_start_idx + i][1][0];
s_twiddle[i][1][1] = twiddle_a[s][twiddle_start_idx + i][1][1];
}
int low_order_bits = i & (stride - 1); // int low_order_bits = i % stride;
int twiddle_idx = low_order_bits;
int pos = 2 * (i - low_order_bits) + low_order_bits;
__syncthreads();
const scalar_t twiddle_val[2][2] = {{s_twiddle[twiddle_idx][0][0], s_twiddle[twiddle_idx][0][1]},
{s_twiddle[twiddle_idx][1][0], s_twiddle[twiddle_idx][1][1]}};
__syncthreads(); // otherwise some thread might go back to writing to s_twiddle before other thread can read
const scalar_t input_val[2] = {s_input[pos], s_input[pos + stride]};
thrust::tie(s_input[pos], s_input[pos + stride]) = mult2x2(twiddle_val[0][0], twiddle_val[0][1], twiddle_val[1][0], twiddle_val[1][1],
input_val[0], input_val[1]);
if (return_intermediates || idx == first_idx + log_max_stride) {
output_a[idx+1][b][s][input_base_idx + pos] = s_input[pos];
output_a[idx+1][b][s][input_base_idx + pos + stride] = s_input[pos + stride];
}
}
}
}
template <typename scalar_t, bool increasing_stride, bool return_intermediates>
__global__ void butterfly_multiply_intermediate_complex_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 5> twiddle_a,
at::PackedTensorAccessor64<scalar_t, 5> output_a,
int log_max_stride,
int log_n) {
using complex_t = thrust::complex<scalar_t>;
const int batch_size = output_a.size(1);
const int s = blockIdx.z;
const int max_stride = 1 << log_max_stride;
const int input_base_idx = blockIdx.x * blockDim.x * 2;
// __shared__ complex_t s_input[ELEMENTARY_SIZE * 2];
__shared__ scalar_t s_input_storage[ELEMENTARY_SIZE * 2][2];
complex_t* s_input = (complex_t *)&s_input_storage[0]; // To avoid warning about race-condition when initializing complex_t
// __shared__ complex_t s_twiddle[ELEMENTARY_SIZE][2][2];
__shared__ scalar_t s_twiddle_storage[ELEMENTARY_SIZE][2][2][2];
complex_t (* s_twiddle)[2][2] = (complex_t (*)[2][2])&s_twiddle_storage[0]; // To avoid warning about race-condition when initializing complex_t
int b = blockIdx.y * blockDim.y + threadIdx.y;
if (b < batch_size) { // Currently we assume 1 batch per thread block, so all threads in the block should enter (otherwise deadlock)
int first_idx = increasing_stride ? 0 : log_n - 1 - log_max_stride;
for (int i = threadIdx.x; i < max_stride * 2; i += blockDim.x) {
s_input[i] = complex_t(output_a[first_idx][b][s][input_base_idx + i][0], output_a[first_idx][b][s][input_base_idx + i][1]);
}
int i = threadIdx.x;
for (int idx = first_idx; idx <= first_idx + log_max_stride; ++idx) {
int log_stride = increasing_stride ? idx : log_n - 1 - idx;
int stride = 1 << log_stride;
int twiddle_start_idx = stride - 1;
if (i < stride) {
s_twiddle[i][0][0] = complex_t(twiddle_a[s][twiddle_start_idx + i][0][0][0], twiddle_a[s][twiddle_start_idx + i][0][0][1]);
s_twiddle[i][0][1] = complex_t(twiddle_a[s][twiddle_start_idx + i][0][1][0], twiddle_a[s][twiddle_start_idx + i][0][1][1]);
s_twiddle[i][1][0] = complex_t(twiddle_a[s][twiddle_start_idx + i][1][0][0], twiddle_a[s][twiddle_start_idx + i][1][0][1]);
s_twiddle[i][1][1] = complex_t(twiddle_a[s][twiddle_start_idx + i][1][1][0], twiddle_a[s][twiddle_start_idx + i][1][1][1]);
}
int low_order_bits = i & (stride - 1); // int low_order_bits = i % stride;
int twiddle_idx = low_order_bits;
int pos = 2 * (i - low_order_bits) + low_order_bits;
__syncthreads();
const complex_t twiddle_val[2][2] = {{s_twiddle[twiddle_idx][0][0], s_twiddle[twiddle_idx][0][1]},
{s_twiddle[twiddle_idx][1][0], s_twiddle[twiddle_idx][1][1]}};
__syncthreads(); // otherwise some thread might go back to writing to s_twiddle before other thread can read
const complex_t input_val[2] = {s_input[pos], s_input[pos + stride]};
thrust::tie(s_input[pos], s_input[pos + stride]) = mult2x2(twiddle_val[0][0], twiddle_val[0][1], twiddle_val[1][0], twiddle_val[1][1],
input_val[0], input_val[1]);
if (return_intermediates || idx == first_idx + log_max_stride) {
output_a[idx+1][b][s][input_base_idx + pos][0] = s_input[pos].real();
output_a[idx+1][b][s][input_base_idx + pos][1] = s_input[pos].imag();
output_a[idx+1][b][s][input_base_idx + pos + stride][0] = s_input[pos + stride].real();
output_a[idx+1][b][s][input_base_idx + pos + stride][1] = s_input[pos + stride].imag();
}
}
}
}
template <typename scalar_t, bool increasing_stride>
__global__ void butterfly_multiply_intermediate_onestep_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 4> twiddle_a,
at::PackedTensorAccessor64<scalar_t, 4> output_a,
int log_stride,
int log_n) {
const int batch_size = output_a.size(1);
const int s = blockIdx.z;
const int idx = increasing_stride ? log_stride : (log_n - 1 - log_stride); // Index to access output_a
const int stride = 1 << log_stride;
int twiddle_start_idx = stride - 1;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int low_order_bits = i & (stride - 1); // int low_order_bits = i % stride;
int twiddle_idx = twiddle_start_idx + low_order_bits;
int pos = 2 * (i - low_order_bits) + low_order_bits;
const scalar_t twiddle_val[2][2] = {{twiddle_a[s][twiddle_idx][0][0], twiddle_a[s][twiddle_idx][0][1]},
{twiddle_a[s][twiddle_idx][1][0], twiddle_a[s][twiddle_idx][1][1]}};
for (int b = blockIdx.y * blockDim.y + threadIdx.y; b < batch_size; b += blockDim.y * gridDim.y) {
const scalar_t input_val[2] = {output_a[idx][b][s][pos], output_a[idx][b][s][pos + stride]};
output_a[idx+1][b][s][pos] = twiddle_val[0][0] * input_val[0] + twiddle_val[0][1] * input_val[1];
output_a[idx+1][b][s][pos + stride] = twiddle_val[1][0] * input_val[0] + twiddle_val[1][1] * input_val[1];
}
}
template <typename scalar_t, bool increasing_stride>
__global__ void butterfly_multiply_intermediate_onestep_complex_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 5> twiddle_a,
at::PackedTensorAccessor64<scalar_t, 5> output_a,
int log_stride,
int log_n) {
using complex_t = thrust::complex<scalar_t>;
const int batch_size = output_a.size(1);
const int s = blockIdx.z;
const int idx = increasing_stride ? log_stride : (log_n - 1 - log_stride); // Index to access output_a
const int stride = 1 << log_stride;
int twiddle_start_idx = stride - 1;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int low_order_bits = i & (stride - 1); // int low_order_bits = i % stride;
int twiddle_idx = twiddle_start_idx + low_order_bits;
int pos = 2 * (i - low_order_bits) + low_order_bits;
const complex_t twiddle_val[2][2] =
{{complex_t(twiddle_a[s][twiddle_idx][0][0][0], twiddle_a[s][twiddle_idx][0][0][1]),
complex_t(twiddle_a[s][twiddle_idx][0][1][0], twiddle_a[s][twiddle_idx][0][1][1])},
{complex_t(twiddle_a[s][twiddle_idx][1][0][0], twiddle_a[s][twiddle_idx][1][0][1]),
complex_t(twiddle_a[s][twiddle_idx][1][1][0], twiddle_a[s][twiddle_idx][1][1][1])}};
for (int b = blockIdx.y * blockDim.y + threadIdx.y; b < batch_size; b += blockDim.y * gridDim.y) {
const complex_t input_val[2] =
{complex_t(output_a[idx][b][s][pos][0], output_a[idx][b][s][pos][1]),
complex_t(output_a[idx][b][s][pos + stride][0], output_a[idx][b][s][pos + stride][1])};
const complex_t output_val[2] =
{twiddle_val[0][0] * input_val[0] + twiddle_val[0][1] * input_val[1],
twiddle_val[1][0] * input_val[0] + twiddle_val[1][1] * input_val[1]};
output_a[idx+1][b][s][pos][0] = output_val[0].real();
output_a[idx+1][b][s][pos][1] = output_val[0].imag();
output_a[idx+1][b][s][pos + stride][0] = output_val[1].real();
output_a[idx+1][b][s][pos + stride][1] = output_val[1].imag();
}
}
void butterfly_multiply_intermediate_cuda(const at::Tensor& twiddle, at::Tensor& output, bool increasing_stride, bool return_intermediates) {
const int batch_size = output.size(1);
const int nstack = twiddle.size(0);
const int n = output.size(3);
const int log_n = int(log2((double) n));
const bool complex = output.dim() == 5;
AT_DISPATCH_FLOATING_TYPES(output.scalar_type(), "butterfly_multiply_intermediate_cuda", [&] {
if (!complex) { // real
const auto twiddle_a = twiddle.packed_accessor64<scalar_t, 4>();
auto output_a = output.packed_accessor64<scalar_t, 4>();
if (increasing_stride) {
int stride = std::min<int>(ELEMENTARY_SIZE, n / 2);
int log_stride = int(log2((double) stride));
dim3 block(stride);
dim3 grid(div_up(n / 2, stride), batch_size, nstack);
return_intermediates ? butterfly_multiply_intermediate_cuda_kernel<scalar_t, true, true>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, log_stride, log_n)
: butterfly_multiply_intermediate_cuda_kernel<scalar_t, true, false>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, log_stride, log_n);
for (log_stride++; log_stride <= log_n - 1; ++log_stride) {
dim3 block(MAX_BLOCK_SIZE / 2);
dim3 grid(div_up(n / 2, MAX_BLOCK_SIZE / 2), div_up(batch_size, WORK_PER_THREAD), nstack);
butterfly_multiply_intermediate_onestep_cuda_kernel<scalar_t, true>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, log_stride, log_n);
}
} else {
int log_stride = log_n - 1;
for (; (1 << log_stride) > ELEMENTARY_SIZE; --log_stride) {
dim3 block(MAX_BLOCK_SIZE / 2);
dim3 grid(div_up(n / 2, MAX_BLOCK_SIZE / 2), div_up(batch_size, WORK_PER_THREAD), nstack);
butterfly_multiply_intermediate_onestep_cuda_kernel<scalar_t, false>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, log_stride, log_n);
}
int stride = 1 << log_stride;
dim3 block(stride);
dim3 grid(div_up(n / 2, stride), batch_size, nstack);
return_intermediates ? butterfly_multiply_intermediate_cuda_kernel<scalar_t, false, true>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, log_stride, log_n)
: butterfly_multiply_intermediate_cuda_kernel<scalar_t, false, false>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, log_stride, log_n);
}
} else { // complex
const auto twiddle_a = twiddle.packed_accessor64<scalar_t, 5>();
auto output_a = output.packed_accessor64<scalar_t, 5>();
if (increasing_stride) {
int stride = std::min<int>(ELEMENTARY_SIZE, n / 2);
int log_stride = int(log2((double) stride));
dim3 block(stride);
dim3 grid(div_up(n / 2, stride), batch_size, nstack);
return_intermediates ? butterfly_multiply_intermediate_complex_cuda_kernel<scalar_t, true, true>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, log_stride, log_n)
: butterfly_multiply_intermediate_complex_cuda_kernel<scalar_t, true, false>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, log_stride, log_n);
for (log_stride++; log_stride <= log_n - 1; ++log_stride) {
dim3 block(MAX_BLOCK_SIZE / 2);
dim3 grid(div_up(n / 2, MAX_BLOCK_SIZE / 2), div_up(batch_size, WORK_PER_THREAD), nstack);
butterfly_multiply_intermediate_onestep_complex_cuda_kernel<scalar_t, true>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, log_stride, log_n);
}
} else {
int log_stride = log_n - 1;
for (; (1 << log_stride) > ELEMENTARY_SIZE; --log_stride) {
dim3 block(MAX_BLOCK_SIZE / 2);
dim3 grid(div_up(n / 2, MAX_BLOCK_SIZE / 2), div_up(batch_size, WORK_PER_THREAD), nstack);
butterfly_multiply_intermediate_onestep_complex_cuda_kernel<scalar_t, false>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, log_stride, log_n);
}
int stride = 1 << log_stride;
dim3 block(stride);
dim3 grid(div_up(n / 2, stride), batch_size, nstack);
return_intermediates ? butterfly_multiply_intermediate_complex_cuda_kernel<scalar_t, false, true>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, log_stride, log_n)
: butterfly_multiply_intermediate_complex_cuda_kernel<scalar_t, false, false>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, log_stride, log_n);
}
}
});
TORCH_CHECK(cudaGetLastError() == cudaSuccess,
"butterfly_multiply_intermediate_cuda failed with error code ",
cudaGetLastError());
}
template <typename scalar_t, typename accscalar_t, bool increasing_stride>
__global__ void butterfly_multiply_intermediate_backward_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 4> twiddle_a,
const at::PackedTensorAccessor64<scalar_t, 4> output_a,
at::PackedTensorAccessor64<scalar_t, 4> d_twiddle_a,
at::PackedTensorAccessor64<scalar_t, 3> d_input_a,
int log_max_stride,
int log_n) {
const int batch_size = output_a.size(1);
const int s = blockIdx.z;
const int max_stride = 1 << log_max_stride;
const int input_base_idx = blockIdx.x * blockDim.x * 2;
__shared__ scalar_t s_grad[ELEMENTARY_SIZE * 2];
__shared__ accscalar_t s_twiddle[ELEMENTARY_SIZE][2][2]; // Use accscalar_t instead of scalar_t since we'll reuse the storage for s_d_twiddle
// __shared__ scalar_t s_d_twiddle[ELEMENTARY_SIZE * 4];
// accscalar_t (* s_d_twiddle)[2][2] = (accscalar_t (*)[2][2])&s_twiddle[0][0][0]; // Reusing the same storage as s_twiddle, have to be careful if we change the implemetnation.
accscalar_t* s_d_twiddle = (accscalar_t *)&s_twiddle[0][0][0]; // Reusing the same storage as s_twiddle, have to be careful if we change the implemetnation.
int b = blockIdx.y * blockDim.y + threadIdx.y;
if (b < batch_size) { // Currently we assume 1 batch per thread block, so all threads in the block should enter (otherwise deadlock)
for (int i = threadIdx.x; i < max_stride * 2; i += blockDim.x) {
s_grad[i] = d_input_a[b][s][input_base_idx + i];
}
int i = threadIdx.x;
int first_idx = increasing_stride ? 0 : log_n - 1 - log_max_stride;
for (int idx = first_idx + log_max_stride; idx >= first_idx; --idx) {
int log_stride = increasing_stride ? idx : log_n - 1 - idx;
int stride = 1 << log_stride;
int twiddle_start_idx = stride - 1;
if (i < stride) {
s_twiddle[i][0][0] = twiddle_a[s][twiddle_start_idx + i][0][0];
s_twiddle[i][0][1] = twiddle_a[s][twiddle_start_idx + i][0][1];
s_twiddle[i][1][0] = twiddle_a[s][twiddle_start_idx + i][1][0];
s_twiddle[i][1][1] = twiddle_a[s][twiddle_start_idx + i][1][1];
}
int low_order_bits = i & (stride - 1); // int low_order_bits = i % stride;
int twiddle_idx = low_order_bits;
int pos = 2 * (i - low_order_bits) + low_order_bits;
__syncthreads();
const scalar_t twiddle_val[2][2] = {{s_twiddle[twiddle_idx][0][0], s_twiddle[twiddle_idx][0][1]},
{s_twiddle[twiddle_idx][1][0], s_twiddle[twiddle_idx][1][1]}};
// Don't need to sync here since we sync later at sum_strided_atomic, so no writing to s_twiddle can occur until then
const scalar_t grad_val[2] = {s_grad[pos], s_grad[pos + stride]};
s_grad[pos] = twiddle_val[0][0] * grad_val[0] + twiddle_val[1][0] * grad_val[1];
s_grad[pos + stride] = twiddle_val[0][1] * grad_val[0] + twiddle_val[1][1] * grad_val[1];
const scalar_t input_val[2] = {output_a[idx][b][s][input_base_idx + pos], output_a[idx][b][s][input_base_idx + pos + stride]};
accscalar_t d_twiddle_val[2][2] = {{grad_val[0] * input_val[0], grad_val[0] * input_val[1]},
{grad_val[1] * input_val[0], grad_val[1] * input_val[1]}};
int tid = threadIdx.x + threadIdx.y * blockDim.x;
int nthreads = blockDim.x * blockDim.y;
sum_strided_atomic(reinterpret_cast<accscalar_t (&)[4]>(d_twiddle_val), s_d_twiddle, stride, nthreads, tid);
if (tid < stride) {
atomicAdd(&d_twiddle_a[s][twiddle_start_idx + twiddle_idx][0][0], s_d_twiddle[twiddle_idx]);
atomicAdd(&d_twiddle_a[s][twiddle_start_idx + twiddle_idx][0][1], s_d_twiddle[twiddle_idx + stride]);
atomicAdd(&d_twiddle_a[s][twiddle_start_idx + twiddle_idx][1][0], s_d_twiddle[twiddle_idx + 2 * stride]);
atomicAdd(&d_twiddle_a[s][twiddle_start_idx + twiddle_idx][1][1], s_d_twiddle[twiddle_idx + 3 * stride]);
}
__syncthreads(); // Otherwise s_d_twiddle will be overwritten with s_twiddle before some thread can read
// sum_strided_exchange(reinterpret_cast<accscalar_t (&)[4]>(d_twiddle_val), s_d_twiddle, log_stride, nthreads, tid);
// int block_reduction_stride = max(warpSize, stride);
// // int n_block_reductions = div_up(nthreads, block_reduction_stride);
// int n_block_reductions = (nthreads + block_reduction_stride - 1) >> max(5, log_stride);
// // if ((tid < n_block_reductions * stride) && (tid % n_block_reductions == 0)) {
// if ((tid < n_block_reductions * stride) && ((tid & (n_block_reductions - 1)) == 0)) {
// // atomicAdd(&d_twiddle_a[s][twiddle_start_idx + tid / n_block_reductions][0][0], d_twiddle_val[0][0]);
// // Trying to avoid integer division
// int log_n_block_reductions = log_max_stride - max(5, log_stride); // Use the fact that nthreads == max_stride and warpSize == 32
// atomicAdd(&d_twiddle_a[s][twiddle_start_idx + (tid >> log_n_block_reductions)][0][0], d_twiddle_val[0][0]);
// atomicAdd(&d_twiddle_a[s][twiddle_start_idx + (tid >> log_n_block_reductions)][0][1], d_twiddle_val[0][1]);
// atomicAdd(&d_twiddle_a[s][twiddle_start_idx + (tid >> log_n_block_reductions)][1][0], d_twiddle_val[1][0]);
// atomicAdd(&d_twiddle_a[s][twiddle_start_idx + (tid >> log_n_block_reductions)][1][1], d_twiddle_val[1][1]);
// }
}
for (int i = threadIdx.x; i < max_stride * 2; i += blockDim.x) {
d_input_a[b][s][input_base_idx + i] = s_grad[i];
}
}
}
template <typename scalar_t, typename accscalar_t, bool increasing_stride>
__global__ void butterfly_multiply_intermediate_backward_complex_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 5> twiddle_a,
const at::PackedTensorAccessor64<scalar_t, 5> output_a,
at::PackedTensorAccessor64<scalar_t, 5> d_twiddle_a,
at::PackedTensorAccessor64<scalar_t, 4> d_input_a,
int log_max_stride,
int log_n) {
using complex_t = thrust::complex<scalar_t>;
using acccomplex_t = thrust::complex<accscalar_t>;
const int batch_size = output_a.size(1);
const int s = blockIdx.z;
const int max_stride = 1 << log_max_stride;
const int input_base_idx = blockIdx.x * blockDim.x * 2;
// __shared__ scalar_t s_grad[ELEMENTARY_SIZE * 2][2];
__shared__ scalar_t s_grad_storage[ELEMENTARY_SIZE * 2][2];
complex_t* s_grad = (complex_t *)&s_grad_storage[0]; // To avoid warning about race-condition when initializing complex_t
// __shared__ accscalar_t s_twiddle[ELEMENTARY_SIZE][2][2][2]; // Use accscalar_t instead of scalar_t since we'll reuse the storage for s_d_twiddle
__shared__ accscalar_t s_twiddle_storage[ELEMENTARY_SIZE][2][2][2];
acccomplex_t (* s_twiddle)[2][2] = (acccomplex_t (*)[2][2])&s_twiddle_storage[0]; // To avoid warning about race-condition when initializing complex_t
// __shared__ scalar_t s_d_twiddle[ELEMENTARY_SIZE * 4];
// acccomplex_t (* s_d_twiddle)[2][2] = (acccomplex_t (*)[2][2])&s_twiddle[0][0][0]; // Reusing the same storage as s_twiddle, have to be careful if we change the implemetnation.
acccomplex_t* s_d_twiddle = (acccomplex_t *)&s_twiddle[0][0][0]; // Reusing the same storage as s_twiddle, have to be careful if we change the implemetnation.
int b = blockIdx.y * blockDim.y + threadIdx.y;
if (b < batch_size) { // Currently we assume 1 batch per thread block, so all threads in the block should enter (otherwise deadlock)
for (int i = threadIdx.x; i < max_stride * 2; i += blockDim.x) {
s_grad[i] = complex_t(d_input_a[b][s][input_base_idx + i][0], d_input_a[b][s][input_base_idx + i][1]);
}
int i = threadIdx.x;
int first_idx = increasing_stride ? 0 : log_n - 1 - log_max_stride;
for (int idx = first_idx + log_max_stride; idx >= first_idx; --idx) {
int log_stride = increasing_stride ? idx : log_n - 1 - idx;
int stride = 1 << log_stride;
int twiddle_start_idx = stride - 1;
if (i < stride) {
s_twiddle[i][0][0] = complex_t(twiddle_a[s][twiddle_start_idx + i][0][0][0], twiddle_a[s][twiddle_start_idx + i][0][0][1]);
s_twiddle[i][0][1] = complex_t(twiddle_a[s][twiddle_start_idx + i][0][1][0], twiddle_a[s][twiddle_start_idx + i][0][1][1]);
s_twiddle[i][1][0] = complex_t(twiddle_a[s][twiddle_start_idx + i][1][0][0], twiddle_a[s][twiddle_start_idx + i][1][0][1]);
s_twiddle[i][1][1] = complex_t(twiddle_a[s][twiddle_start_idx + i][1][1][0], twiddle_a[s][twiddle_start_idx + i][1][1][1]);
}
int low_order_bits = i & (stride - 1); // int low_order_bits = i % stride;
int twiddle_idx = low_order_bits;
int pos = 2 * (i - low_order_bits) + low_order_bits;
__syncthreads();
const complex_t twiddle_val[2][2] = {{s_twiddle[twiddle_idx][0][0], s_twiddle[twiddle_idx][0][1]},
{s_twiddle[twiddle_idx][1][0], s_twiddle[twiddle_idx][1][1]}};
// Don't need to sync here since we sync later at sum_strided_atomic, so no writing to s_twiddle can occur until then
const complex_t grad_val[2] = {s_grad[pos], s_grad[pos + stride]};
s_grad[pos] = thrust::conj(twiddle_val[0][0]) * grad_val[0] + thrust::conj(twiddle_val[1][0]) * grad_val[1];
s_grad[pos + stride] = thrust::conj(twiddle_val[0][1]) * grad_val[0] + thrust::conj(twiddle_val[1][1]) * grad_val[1];
const complex_t input_val[2] =
{complex_t(output_a[idx][b][s][input_base_idx + pos][0], output_a[idx][b][s][input_base_idx + pos][1]),
complex_t(output_a[idx][b][s][input_base_idx + pos + stride][0], output_a[idx][b][s][input_base_idx + pos + stride][1])};
acccomplex_t d_twiddle_val[2][2] =
{{grad_val[0] * thrust::conj(input_val[0]), grad_val[0] * thrust::conj(input_val[1])},
{grad_val[1] * thrust::conj(input_val[0]), grad_val[1] * thrust::conj(input_val[1])}};
int tid = threadIdx.x + threadIdx.y * blockDim.x;
int nthreads = blockDim.x * blockDim.y;
sum_strided_atomic(reinterpret_cast<acccomplex_t (&)[4]>(d_twiddle_val), s_d_twiddle, stride, nthreads, tid);
if (tid < stride) {
atomicAdd(&d_twiddle_a[s][twiddle_start_idx + twiddle_idx][0][0][0], s_d_twiddle[twiddle_idx].real());
atomicAdd(&d_twiddle_a[s][twiddle_start_idx + twiddle_idx][0][0][1], s_d_twiddle[twiddle_idx].imag());
atomicAdd(&d_twiddle_a[s][twiddle_start_idx + twiddle_idx][0][1][0], s_d_twiddle[twiddle_idx + stride].real());
atomicAdd(&d_twiddle_a[s][twiddle_start_idx + twiddle_idx][0][1][1], s_d_twiddle[twiddle_idx + stride].imag());
atomicAdd(&d_twiddle_a[s][twiddle_start_idx + twiddle_idx][1][0][0], s_d_twiddle[twiddle_idx + 2 * stride].real());
atomicAdd(&d_twiddle_a[s][twiddle_start_idx + twiddle_idx][1][0][1], s_d_twiddle[twiddle_idx + 2 * stride].imag());
atomicAdd(&d_twiddle_a[s][twiddle_start_idx + twiddle_idx][1][1][0], s_d_twiddle[twiddle_idx + 3 * stride].real());
atomicAdd(&d_twiddle_a[s][twiddle_start_idx + twiddle_idx][1][1][1], s_d_twiddle[twiddle_idx + 3 * stride].imag());
}
__syncthreads(); // Otherwise s_d_twiddle will be overwritten with s_twiddle before some thread can read
}
for (int i = threadIdx.x; i < max_stride * 2; i += blockDim.x) {
d_input_a[b][s][input_base_idx + i][0] = s_grad[i].real();
d_input_a[b][s][input_base_idx + i][1] = s_grad[i].imag();
}
}
}
template <typename scalar_t, typename accscalar_t, bool increasing_stride>
__global__ void butterfly_multiply_intermediate_backward_onestep_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 4> twiddle_a,
const at::PackedTensorAccessor64<scalar_t, 4> output_a,
at::PackedTensorAccessor64<scalar_t, 4> d_twiddle_a,
at::PackedTensorAccessor64<scalar_t, 3> d_input_a,
int log_stride,
int log_n) {
const int batch_size = output_a.size(1);
const int s = blockIdx.z;
const int idx = increasing_stride ? log_stride : (log_n - 1 - log_stride); // Index to access output_a
const int n = output_a.size(3);
int stride = 1 << log_stride;
int twiddle_start_idx = stride - 1;
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i > n) return;
int low_order_bits = i & (stride - 1); // int low_order_bits = i % stride;
int twiddle_idx = twiddle_start_idx + low_order_bits;
int pos = 2 * (i - low_order_bits) + low_order_bits;
const scalar_t twiddle_val[2][2] = {{twiddle_a[s][twiddle_idx][0][0], twiddle_a[s][twiddle_idx][0][1]},
{twiddle_a[s][twiddle_idx][1][0], twiddle_a[s][twiddle_idx][1][1]}};
accscalar_t d_twiddle_val[2][2] = {{0, 0}, {0, 0}};
for (int b = blockIdx.y * blockDim.y + threadIdx.y; b < batch_size; b += blockDim.y * gridDim.y) {
const scalar_t grad_val[2] = {d_input_a[b][s][pos], d_input_a[b][s][pos + stride]};
d_input_a[b][s][pos] = twiddle_val[0][0] * grad_val[0] + twiddle_val[1][0] * grad_val[1];
d_input_a[b][s][pos + stride] = twiddle_val[0][1] * grad_val[0] + twiddle_val[1][1] * grad_val[1];
const scalar_t input_val[2] = {output_a[idx][b][s][pos], output_a[idx][b][s][pos + stride]};
d_twiddle_val[0][0] += grad_val[0] * input_val[0];
d_twiddle_val[0][1] += grad_val[0] * input_val[1];
d_twiddle_val[1][0] += grad_val[1] * input_val[0];
d_twiddle_val[1][1] += grad_val[1] * input_val[1];
}
atomicAdd(&d_twiddle_a[s][twiddle_idx][0][0], d_twiddle_val[0][0]);
atomicAdd(&d_twiddle_a[s][twiddle_idx][0][1], d_twiddle_val[0][1]);
atomicAdd(&d_twiddle_a[s][twiddle_idx][1][0], d_twiddle_val[1][0]);
atomicAdd(&d_twiddle_a[s][twiddle_idx][1][1], d_twiddle_val[1][1]);
}
template <typename scalar_t, typename accscalar_t, bool increasing_stride>
__global__ void butterfly_multiply_intermediate_backward_onestep_complex_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 5> twiddle_a,
const at::PackedTensorAccessor64<scalar_t, 5> output_a,
at::PackedTensorAccessor64<scalar_t, 5> d_twiddle_a,
at::PackedTensorAccessor64<scalar_t, 4> d_input_a,
int log_stride,
int log_n) {
using complex_t = thrust::complex<scalar_t>;
using acccomplex_t = thrust::complex<accscalar_t>;
const int batch_size = output_a.size(1);
const int s = blockIdx.z;
const int idx = increasing_stride ? log_stride : (log_n - 1 - log_stride); // Index to access output_a
const int n = output_a.size(3);
int stride = 1 << log_stride;
int twiddle_start_idx = stride - 1;
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i > n) return;
int low_order_bits = i & (stride - 1); // int low_order_bits = i % stride;
int twiddle_idx = twiddle_start_idx + low_order_bits;
int pos = 2 * (i - low_order_bits) + low_order_bits;
const complex_t twiddle_val[2][2] =
{{complex_t(twiddle_a[s][twiddle_idx][0][0][0], twiddle_a[s][twiddle_idx][0][0][1]),
complex_t(twiddle_a[s][twiddle_idx][0][1][0], twiddle_a[s][twiddle_idx][0][1][1])},
{complex_t(twiddle_a[s][twiddle_idx][1][0][0], twiddle_a[s][twiddle_idx][1][0][1]),
complex_t(twiddle_a[s][twiddle_idx][1][1][0], twiddle_a[s][twiddle_idx][1][1][1])}};
acccomplex_t d_twiddle_val[2][2] = {{{0, 0}, {0, 0}}, {{0, 0}, {0, 0}}};
for (int b = blockIdx.y * blockDim.y + threadIdx.y; b < batch_size; b += blockDim.y * gridDim.y) {
const complex_t grad_val[2] = {complex_t(d_input_a[b][s][pos][0], d_input_a[b][s][pos][1]),
complex_t(d_input_a[b][s][pos + stride][0], d_input_a[b][s][pos + stride][1])};
const complex_t d_input_val[2] =
{thrust::conj(twiddle_val[0][0]) * grad_val[0] + thrust::conj(twiddle_val[1][0]) * grad_val[1],
thrust::conj(twiddle_val[0][1]) * grad_val[0] + thrust::conj(twiddle_val[1][1]) * grad_val[1]};
d_input_a[b][s][pos][0] = d_input_val[0].real();
d_input_a[b][s][pos][1] = d_input_val[0].imag();
d_input_a[b][s][pos + stride][0] = d_input_val[1].real();
d_input_a[b][s][pos + stride][1] = d_input_val[1].imag();
const complex_t input_val[2] =
{complex_t(output_a[idx][b][s][pos][0], output_a[idx][b][s][pos][1]),
complex_t(output_a[idx][b][s][pos + stride][0], output_a[idx][b][s][pos + stride][1])};
d_twiddle_val[0][0] += grad_val[0] * thrust::conj(input_val[0]);
d_twiddle_val[0][1] += grad_val[0] * thrust::conj(input_val[1]);
d_twiddle_val[1][0] += grad_val[1] * thrust::conj(input_val[0]);
d_twiddle_val[1][1] += grad_val[1] * thrust::conj(input_val[1]);
}
atomicAdd(&d_twiddle_a[s][twiddle_idx][0][0][0], d_twiddle_val[0][0].real());
atomicAdd(&d_twiddle_a[s][twiddle_idx][0][0][1], d_twiddle_val[0][0].imag());
atomicAdd(&d_twiddle_a[s][twiddle_idx][0][1][0], d_twiddle_val[0][1].real());
atomicAdd(&d_twiddle_a[s][twiddle_idx][0][1][1], d_twiddle_val[0][1].imag());
atomicAdd(&d_twiddle_a[s][twiddle_idx][1][0][0], d_twiddle_val[1][0].real());
atomicAdd(&d_twiddle_a[s][twiddle_idx][1][0][1], d_twiddle_val[1][0].imag());
atomicAdd(&d_twiddle_a[s][twiddle_idx][1][1][0], d_twiddle_val[1][1].real());
atomicAdd(&d_twiddle_a[s][twiddle_idx][1][1][1], d_twiddle_val[1][1].imag());
}
void butterfly_multiply_intermediate_backward_cuda(const at::Tensor& twiddle, const at::Tensor& output,
at::Tensor& d_twiddle, at::Tensor& d_input, bool increasing_stride) {
const int batch_size = output.size(1);
const int nstack = output.size(2);
const int n = output.size(3);
const int log_n = int(log2((double) n));
const bool complex = output.dim() == 5;
AT_DISPATCH_FLOATING_TYPES(output.scalar_type(), "butterfly_multiply_intermediate_backward_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
if (!complex) { // real
const auto twiddle_a = twiddle.packed_accessor64<scalar_t, 4>();
const auto output_a = output.packed_accessor64<scalar_t, 4>();
auto d_twiddle_a = d_twiddle.packed_accessor64<scalar_t, 4>();
auto d_input_a = d_input.packed_accessor64<scalar_t, 3>();
if (increasing_stride) {
int log_stride = log_n - 1;
for (; (1 << log_stride) > ELEMENTARY_SIZE; --log_stride) {
dim3 block(MAX_BLOCK_SIZE / 2);
dim3 grid(div_up(n / 2, MAX_BLOCK_SIZE / 2), div_up(batch_size, WORK_PER_THREAD), nstack);
butterfly_multiply_intermediate_backward_onestep_cuda_kernel<scalar_t, accscalar_t, true>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, d_twiddle_a, d_input_a, log_stride, log_n);
}
int stride = 1 << log_stride;
dim3 block(stride);
dim3 grid(div_up(n / 2, stride), batch_size, nstack);
butterfly_multiply_intermediate_backward_cuda_kernel<scalar_t, accscalar_t, true>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, d_twiddle_a, d_input_a, log_stride, log_n);
} else {
int stride = std::min<int>(ELEMENTARY_SIZE, n / 2);
int log_stride = int(log2((double) stride));
dim3 block(stride);
dim3 grid(div_up(n / 2, stride), batch_size, nstack);
butterfly_multiply_intermediate_backward_cuda_kernel<scalar_t, accscalar_t, false>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, d_twiddle_a, d_input_a, log_stride, log_n);
for (log_stride++; log_stride <= log_n - 1; ++log_stride) {
dim3 block(MAX_BLOCK_SIZE / 2);
dim3 grid(div_up(n / 2, MAX_BLOCK_SIZE / 2), div_up(batch_size, WORK_PER_THREAD), nstack);
butterfly_multiply_intermediate_backward_onestep_cuda_kernel<scalar_t, accscalar_t, false>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, d_twiddle_a, d_input_a, log_stride, log_n);
}
}
} else { // complex
const auto twiddle_a = twiddle.packed_accessor64<scalar_t, 5>();
const auto output_a = output.packed_accessor64<scalar_t, 5>();
auto d_twiddle_a = d_twiddle.packed_accessor64<scalar_t, 5>();
auto d_input_a = d_input.packed_accessor64<scalar_t, 4>();
if (increasing_stride) {
int log_stride = log_n - 1;
for (; (1 << log_stride) > ELEMENTARY_SIZE; --log_stride) {
dim3 block(MAX_BLOCK_SIZE / 2);
dim3 grid(div_up(n / 2, MAX_BLOCK_SIZE / 2), div_up(batch_size, WORK_PER_THREAD), nstack);
butterfly_multiply_intermediate_backward_onestep_complex_cuda_kernel<scalar_t, accscalar_t, true>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, d_twiddle_a, d_input_a, log_stride, log_n);
}
int stride = 1 << log_stride;
dim3 block(stride);
dim3 grid(div_up(n / 2, stride), batch_size, nstack);
butterfly_multiply_intermediate_backward_complex_cuda_kernel<scalar_t, accscalar_t, true>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, d_twiddle_a, d_input_a, log_stride, log_n);
} else {
int stride = std::min<int>(ELEMENTARY_SIZE, n / 2);
int log_stride = int(log2((double) stride));
dim3 block(stride);
dim3 grid(div_up(n / 2, stride), batch_size, nstack);
butterfly_multiply_intermediate_backward_complex_cuda_kernel<scalar_t, accscalar_t, false>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, d_twiddle_a, d_input_a, log_stride, log_n);
for (log_stride++; log_stride <= log_n - 1; ++log_stride) {
dim3 block(MAX_BLOCK_SIZE / 2);
dim3 grid(div_up(n / 2, MAX_BLOCK_SIZE / 2), div_up(batch_size, WORK_PER_THREAD), nstack);
butterfly_multiply_intermediate_backward_onestep_complex_cuda_kernel<scalar_t, accscalar_t, false>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, d_twiddle_a, d_input_a, log_stride, log_n);
}
}
}
});
TORCH_CHECK(cudaGetLastError() == cudaSuccess,
"butterfly_multiply_intermediate_backward_cuda failed with error code ",
cudaGetLastError());
}
template <typename scalar_t, bool increasing_stride, bool return_intermediates>
__global__ void butterfly_multiply_untied_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 5> twiddle_a,
at::PackedTensorAccessor64<scalar_t, 4> output_a,
int log_max_stride,
int log_n) {
const int batch_size = output_a.size(1);
const int s = blockIdx.z;
const int max_stride = 1 << log_max_stride;
const int input_base_idx = blockIdx.y * blockDim.x * 2;
__shared__ scalar_t s_input[ELEMENTARY_SIZE * 2];
__shared__ scalar_t s_twiddle[ELEMENTARY_SIZE][2][2];
int b = blockIdx.x * blockDim.y + threadIdx.y;
int first_idx = increasing_stride ? 0 : log_n - 1 - log_max_stride;
if (b < batch_size) {
for (int i = threadIdx.x; i < max_stride * 2; i += blockDim.x) {
s_input[i + threadIdx.y * max_stride * 2] = output_a[first_idx][b][s][input_base_idx + i];
}
}
int tid_x = threadIdx.x;
int tid_y = threadIdx.y;
for (int idx = first_idx; idx <= first_idx + log_max_stride; ++idx) {
int log_stride = increasing_stride ? idx : log_n - 1 - idx;
int stride = 1 << log_stride;
if (tid_y == 0) {
s_twiddle[tid_x][0][0] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][0][0];
s_twiddle[tid_x][0][1] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][0][1];
s_twiddle[tid_x][1][0] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][1][0];
s_twiddle[tid_x][1][1] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][1][1];
}
int low_order_bits = tid_x & (stride - 1); // int low_order_bits = tid_x % stride;
int pos_x = 2 * (tid_x - low_order_bits) + low_order_bits;
int pos_y = tid_y * max_stride * 2;
int pos = pos_x + pos_y;
__syncthreads();
const scalar_t twiddle_val[2][2] = {{s_twiddle[tid_x][0][0], s_twiddle[tid_x][0][1]},
{s_twiddle[tid_x][1][0], s_twiddle[tid_x][1][1]}};
__syncthreads(); // otherwise some thread might go back to writing to s_twiddle before other thread can read
if (b < batch_size) {
const scalar_t input_val[2] = {s_input[pos], s_input[pos + stride]};
s_input[pos] = twiddle_val[0][0] * input_val[0] + twiddle_val[0][1] * input_val[1];
s_input[pos + stride] = twiddle_val[1][0] * input_val[0] + twiddle_val[1][1] * input_val[1];
if (return_intermediates || idx == first_idx + log_max_stride) {
output_a[idx+1][b][s][input_base_idx + pos_x] = s_input[pos];
output_a[idx+1][b][s][input_base_idx + pos_x + stride] = s_input[pos + stride];
}
}
}
}
// Trying out an implementation where consecutive threads process same input index, but different batch indices.
// template <typename scalar_t, bool increasing_stride, bool return_intermediates>
// __global__ void butterfly_multiply_untied_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 5> twiddle_a,
// at::PackedTensorAccessor64<scalar_t, 4> output_a,
// int log_max_stride,
// int log_n) {
// const int batch_size = output_a.size(1);
// const int s = blockIdx.z;
// const int max_stride = 1 << log_max_stride;
// const int input_base_idx = blockIdx.y * blockDim.y * 2;
// __shared__ scalar_t s_input[ELEMENTARY_SIZE * 2];
// __shared__ scalar_t s_twiddle[ELEMENTARY_SIZE][2][2];
// int b = blockIdx.x * blockDim.x + threadIdx.x;
// int tid_x = threadIdx.x; // batch index
// int tid_y = threadIdx.y;
// int first_idx = increasing_stride ? 0 : log_n - 1 - log_max_stride;
// if (b < batch_size) {
// for (int i = tid_y; i < max_stride * 2; i += blockDim.y) {
// s_input[tid_x + i * blockDim.x] = output_a[first_idx][b][s][input_base_idx + i];
// }
// }
// // for (int i = tid_x + tid_y * blockDim.x; i < blockDim.x * max_stride * 2; i += blockDim.x * blockDim.y) {
// // int input_idx = i & (max_stride * 2 - 1); // int input_idx = i % (max_stride * 2);
// // int batch_idx = i >> (log_max_stride + 1); // int batch_idx = (i - input_idx) / (max_stride * 2);
// // if (blockIdx.x * blockDim.x + batch_idx < batch_size) {
// // s_input[batch_idx + input_idx * blockDim.x] = output_a[blockIdx.x * blockDim.x + first_idx][batch_idx][s][input_base_idx + input_idx];
// // }
// // }
// for (int idx = first_idx; idx <= first_idx + log_max_stride; ++idx) {
// int log_stride = increasing_stride ? idx : log_n - 1 - idx;
// int stride = 1 << log_stride;
// if (tid_x == 0) {
// s_twiddle[tid_y][0][0] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_y][0][0];
// s_twiddle[tid_y][0][1] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_y][0][1];
// s_twiddle[tid_y][1][0] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_y][1][0];
// s_twiddle[tid_y][1][1] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_y][1][1];
// }
// int low_order_bits = tid_y & (stride - 1); // int low_order_bits = tid_y % stride;
// int pos_y = 2 * (tid_y - low_order_bits) + low_order_bits;
// int pos_x = tid_x;
// int pos = pos_x + pos_y * blockDim.x;
// __syncthreads();
// const scalar_t twiddle_val[2][2] = {{s_twiddle[tid_y][0][0], s_twiddle[tid_y][0][1]},
// {s_twiddle[tid_y][1][0], s_twiddle[tid_y][1][1]}};
// __syncthreads(); // otherwise some thread might go back to writing to s_twiddle before other thread can read
// if (b < batch_size) {
// const scalar_t input_val[2] = {s_input[pos], s_input[pos + stride * blockDim.x]};
// s_input[pos] = twiddle_val[0][0] * input_val[0] + twiddle_val[0][1] * input_val[1];
// s_input[pos + stride * blockDim.x] = twiddle_val[1][0] * input_val[0] + twiddle_val[1][1] * input_val[1];
// if (return_intermediates || idx == first_idx + log_max_stride) {
// output_a[idx+1][b][s][input_base_idx + pos_y] = s_input[pos];
// output_a[idx+1][b][s][input_base_idx + pos_y + stride] = s_input[pos + stride * blockDim.x];
// }
// }
// }
// }
template <typename scalar_t, bool increasing_stride, bool return_intermediates>
__global__ void butterfly_multiply_untied_complex_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 6> twiddle_a,
at::PackedTensorAccessor64<scalar_t, 5> output_a,
int log_max_stride,
int log_n) {
using complex_t = thrust::complex<scalar_t>;
const int batch_size = output_a.size(1);
const int s = blockIdx.z;
const int max_stride = 1 << log_max_stride;
const int input_base_idx = blockIdx.y * blockDim.x * 2;
// __shared__ complex_t s_input[ELEMENTARY_SIZE * 2];
__shared__ scalar_t s_input_storage[ELEMENTARY_SIZE * 2][2];
complex_t* s_input = (complex_t *)&s_input_storage[0]; // To avoid warning about race-condition when initializing complex_t
int b = blockIdx.x * blockDim.y + threadIdx.y;
if (b < batch_size) { // Currently we assume 1 batch per thread block, so all threads in the block should enter (otherwise deadlock)
int first_idx = increasing_stride ? 0 : log_n - 1 - log_max_stride;
for (int i = threadIdx.x; i < max_stride * 2; i += blockDim.x) {
s_input[i] = complex_t(output_a[first_idx][b][s][input_base_idx + i][0], output_a[first_idx][b][s][input_base_idx + i][1]);
}
int i = threadIdx.x;
for (int idx = first_idx; idx <= first_idx + log_max_stride; ++idx) {
int log_stride = increasing_stride ? idx : log_n - 1 - idx;
int stride = 1 << log_stride;
int low_order_bits = i & (stride - 1); // int low_order_bits = i % stride;
int pos = 2 * (i - low_order_bits) + low_order_bits;
const complex_t twiddle_val[2][2] =
{{complex_t(twiddle_a[s][log_stride][input_base_idx / 2 + i][0][0][0], twiddle_a[s][log_stride][input_base_idx / 2 + i][0][0][1]),
complex_t(twiddle_a[s][log_stride][input_base_idx / 2 + i][0][1][0], twiddle_a[s][log_stride][input_base_idx / 2 + i][0][1][1])},
{complex_t(twiddle_a[s][log_stride][input_base_idx / 2 + i][1][0][0], twiddle_a[s][log_stride][input_base_idx / 2 + i][1][0][1]),
complex_t(twiddle_a[s][log_stride][input_base_idx / 2 + i][1][1][0], twiddle_a[s][log_stride][input_base_idx / 2 + i][1][1][1])}};
__syncthreads();
const complex_t input_val[2] = {s_input[pos], s_input[pos + stride]};
s_input[pos] = twiddle_val[0][0] * input_val[0] + twiddle_val[0][1] * input_val[1];
s_input[pos + stride] = twiddle_val[1][0] * input_val[0] + twiddle_val[1][1] * input_val[1];
if (return_intermediates || idx == first_idx + log_max_stride) {
output_a[idx+1][b][s][input_base_idx + pos][0] = s_input[pos].real();
output_a[idx+1][b][s][input_base_idx + pos][1] = s_input[pos].imag();
output_a[idx+1][b][s][input_base_idx + pos + stride][0] = s_input[pos + stride].real();
output_a[idx+1][b][s][input_base_idx + pos + stride][1] = s_input[pos + stride].imag();
}
}
}
}
template <typename scalar_t, bool increasing_stride>
__global__ void butterfly_multiply_untied_onestep_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 5> twiddle_a,
at::PackedTensorAccessor64<scalar_t, 4> output_a,
int log_stride,
int log_n) {
const int batch_size = output_a.size(1);
const int s = blockIdx.z;
const int idx = increasing_stride ? log_stride : (log_n - 1 - log_stride); // Index to access output_a
const int stride = 1 << log_stride;
int i = blockIdx.y * blockDim.x + threadIdx.x;
int low_order_bits = i & (stride - 1); // int low_order_bits = i % stride;
int pos = 2 * (i - low_order_bits) + low_order_bits;
const scalar_t twiddle_val[2][2] = {{twiddle_a[s][log_stride][i][0][0], twiddle_a[s][log_stride][i][0][1]},
{twiddle_a[s][log_stride][i][1][0], twiddle_a[s][log_stride][i][1][1]}};
for (int b = blockIdx.x * blockDim.y + threadIdx.y; b < batch_size; b += blockDim.y * gridDim.x) {
const scalar_t input_val[2] = {output_a[idx][b][s][pos], output_a[idx][b][s][pos + stride]};
output_a[idx+1][b][s][pos] = twiddle_val[0][0] * input_val[0] + twiddle_val[0][1] * input_val[1];
output_a[idx+1][b][s][pos + stride] = twiddle_val[1][0] * input_val[0] + twiddle_val[1][1] * input_val[1];
}
}
template <typename scalar_t, bool increasing_stride>
__global__ void butterfly_multiply_untied_onestep_complex_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 6> twiddle_a,
at::PackedTensorAccessor64<scalar_t, 5> output_a,
int log_stride,
int log_n) {
using complex_t = thrust::complex<scalar_t>;
const int batch_size = output_a.size(1);
const int s = blockIdx.z;
const int idx = increasing_stride ? log_stride : (log_n - 1 - log_stride); // Index to access output_a
const int stride = 1 << log_stride;
int i = blockIdx.y * blockDim.x + threadIdx.x;
int low_order_bits = i & (stride - 1); // int low_order_bits = i % stride;
int pos = 2 * (i - low_order_bits) + low_order_bits;
const complex_t twiddle_val[2][2] =
{{complex_t(twiddle_a[s][log_stride][i][0][0][0], twiddle_a[s][log_stride][i][0][0][1]),
complex_t(twiddle_a[s][log_stride][i][0][1][0], twiddle_a[s][log_stride][i][0][1][1])},
{complex_t(twiddle_a[s][log_stride][i][1][0][0], twiddle_a[s][log_stride][i][1][0][1]),
complex_t(twiddle_a[s][log_stride][i][1][1][0], twiddle_a[s][log_stride][i][1][1][1])}};
for (int b = blockIdx.x * blockDim.y + threadIdx.y; b < batch_size; b += blockDim.y * gridDim.x) {
const complex_t input_val[2] =
{complex_t(output_a[idx][b][s][pos][0], output_a[idx][b][s][pos][1]),
complex_t(output_a[idx][b][s][pos + stride][0], output_a[idx][b][s][pos + stride][1])};
const complex_t output_val[2] =
{twiddle_val[0][0] * input_val[0] + twiddle_val[0][1] * input_val[1],
twiddle_val[1][0] * input_val[0] + twiddle_val[1][1] * input_val[1]};
output_a[idx+1][b][s][pos][0] = output_val[0].real();
output_a[idx+1][b][s][pos][1] = output_val[0].imag();
output_a[idx+1][b][s][pos + stride][0] = output_val[1].real();
output_a[idx+1][b][s][pos + stride][1] = output_val[1].imag();
}
}
void butterfly_multiply_untied_cuda(const at::Tensor& twiddle, at::Tensor& output, bool increasing_stride, bool return_intermediates) {
const int batch_size = output.size(1);
const int nstack = twiddle.size(0);
const int n = output.size(3);
const int log_n = int(log2((double) n));
const bool complex = output.dim() == 5;
AT_DISPATCH_FLOATING_TYPES(output.scalar_type(), "butterfly_multiply_untied_cuda", [&] {
if (!complex) { // real
const auto twiddle_a = twiddle.packed_accessor64<scalar_t, 5>();
auto output_a = output.packed_accessor64<scalar_t, 4>();
if (increasing_stride) {
int stride = std::min<int>(ELEMENTARY_SIZE, n / 2);
int log_stride = int(log2((double) stride));
dim3 block(stride, div_up(MAX_BLOCK_SIZE, stride * 2));
dim3 grid(div_up(batch_size, block.y), div_up(n / 2, stride), nstack);
// dim3 block(div_up(MAX_BLOCK_SIZE, stride * 2), stride);
// dim3 grid(div_up(batch_size, block.x), div_up(n / 2, stride), nstack);
return_intermediates ? butterfly_multiply_untied_cuda_kernel<scalar_t, true, true>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, log_stride, log_n)
: butterfly_multiply_untied_cuda_kernel<scalar_t, true, false>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, log_stride, log_n);
for (log_stride++; log_stride <= log_n - 1; ++log_stride) {
dim3 block(MAX_BLOCK_SIZE / 2);
dim3 grid(div_up(batch_size, WORK_PER_THREAD), div_up(n / 2, MAX_BLOCK_SIZE / 2), nstack);
butterfly_multiply_untied_onestep_cuda_kernel<scalar_t, true>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, log_stride, log_n);
}
} else {
int log_stride = log_n - 1;
for (; (1 << log_stride) > ELEMENTARY_SIZE; --log_stride) {
dim3 block(MAX_BLOCK_SIZE / 2);
dim3 grid(div_up(batch_size, WORK_PER_THREAD), div_up(n / 2, MAX_BLOCK_SIZE / 2), nstack);
butterfly_multiply_untied_onestep_cuda_kernel<scalar_t, false>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, log_stride, log_n);
}
int stride = 1 << log_stride;
dim3 block(stride, div_up(MAX_BLOCK_SIZE, stride * 2));
dim3 grid(div_up(batch_size, block.y), div_up(n / 2, stride), nstack);
// dim3 block(div_up(MAX_BLOCK_SIZE, stride * 2), stride);
// dim3 grid(div_up(batch_size, block.x), div_up(n / 2, stride), nstack);
return_intermediates ? butterfly_multiply_untied_cuda_kernel<scalar_t, false, true>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, log_stride, log_n)
: butterfly_multiply_untied_cuda_kernel<scalar_t, false, false>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, log_stride, log_n);
}
} else { // complex
const auto twiddle_a = twiddle.packed_accessor64<scalar_t, 6>();
auto output_a = output.packed_accessor64<scalar_t, 5>();
if (increasing_stride) {
int stride = std::min<int>(ELEMENTARY_SIZE, n / 2);
int log_stride = int(log2((double) stride));
dim3 block(stride);
dim3 grid(batch_size, div_up(n / 2, stride), nstack);
return_intermediates ? butterfly_multiply_untied_complex_cuda_kernel<scalar_t, true, true>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, log_stride, log_n)
: butterfly_multiply_untied_complex_cuda_kernel<scalar_t, true, false>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, log_stride, log_n);
for (log_stride++; log_stride <= log_n - 1; ++log_stride) {
dim3 block(MAX_BLOCK_SIZE / 2);
dim3 grid(div_up(batch_size, WORK_PER_THREAD), div_up(n / 2, MAX_BLOCK_SIZE / 2), nstack);
butterfly_multiply_untied_onestep_complex_cuda_kernel<scalar_t, true>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, log_stride, log_n);
}
} else {
int log_stride = log_n - 1;
for (; (1 << log_stride) > ELEMENTARY_SIZE; --log_stride) {
dim3 block(MAX_BLOCK_SIZE / 2);
dim3 grid(div_up(batch_size, WORK_PER_THREAD), div_up(n / 2, MAX_BLOCK_SIZE / 2), nstack);
butterfly_multiply_untied_onestep_complex_cuda_kernel<scalar_t, false>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, log_stride, log_n);
}
int stride = 1 << log_stride;
dim3 block(stride);
dim3 grid(batch_size, div_up(n / 2, stride), nstack);
return_intermediates ? butterfly_multiply_untied_complex_cuda_kernel<scalar_t, false, true>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, log_stride, log_n)
: butterfly_multiply_untied_complex_cuda_kernel<scalar_t, false, false>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, log_stride, log_n);
}
}
});
TORCH_CHECK(cudaGetLastError() == cudaSuccess,
"butterfly_multiply_untied_cuda failed with error code ",
cudaGetLastError());
}
// Original implementation, with 1 batch per thread block
// template <typename scalar_t, typename accscalar_t, bool increasing_stride>
// __global__ void butterfly_multiply_untied_backward_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 5> twiddle_a,
// const at::PackedTensorAccessor64<scalar_t, 4> output_a,
// at::PackedTensorAccessor64<scalar_t, 5> d_twiddle_a,
// at::PackedTensorAccessor64<scalar_t, 3> d_input_a,
// int log_max_stride,
// int log_n) {
// const int batch_size = output_a.size(1);
// const int s = blockIdx.z;
// const int max_stride = 1 << log_max_stride;
// const int input_base_idx = blockIdx.y * blockDim.x * 2;
// __shared__ scalar_t s_grad[ELEMENTARY_SIZE * 2];
// int b = blockIdx.x * blockDim.y + threadIdx.y;
// if (b < batch_size) { // Currently we assume 1 batch per thread block, so all threads in the block should enter (otherwise deadlock)
// for (int i = threadIdx.x; i < max_stride * 2; i += blockDim.x) {
// s_grad[i] = d_input_a[b][s][input_base_idx + i];
// }
// int i = threadIdx.x;
// int first_idx = increasing_stride ? 0 : log_n - 1 - log_max_stride;
// for (int idx = first_idx + log_max_stride; idx >= first_idx; --idx) {
// int log_stride = increasing_stride ? idx : log_n - 1 - idx;
// int stride = 1 << log_stride;
// int low_order_bits = i & (stride - 1); // int low_order_bits = i % stride;
// int pos = 2 * (i - low_order_bits) + low_order_bits;
// const scalar_t twiddle_val[2][2] = {{twiddle_a[s][log_stride][input_base_idx / 2 + i][0][0], twiddle_a[s][log_stride][input_base_idx / 2 + i][0][1]},
// {twiddle_a[s][log_stride][input_base_idx / 2 + i][1][0], twiddle_a[s][log_stride][input_base_idx / 2 + i][1][1]}};
// __syncthreads();
// const scalar_t grad_val[2] = {s_grad[pos], s_grad[pos + stride]};
// s_grad[pos] = twiddle_val[0][0] * grad_val[0] + twiddle_val[1][0] * grad_val[1];
// s_grad[pos + stride] = twiddle_val[0][1] * grad_val[0] + twiddle_val[1][1] * grad_val[1];
// const scalar_t input_val[2] = {output_a[idx][b][s][input_base_idx + pos], output_a[idx][b][s][input_base_idx + pos + stride]};
// accscalar_t d_twiddle_val[2][2] = {{grad_val[0] * input_val[0], grad_val[0] * input_val[1]},
// {grad_val[1] * input_val[0], grad_val[1] * input_val[1]}};
// atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + i][0][0], d_twiddle_val[0][0]);
// atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + i][0][1], d_twiddle_val[0][1]);
// atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + i][1][0], d_twiddle_val[1][0]);
// atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + i][1][1], d_twiddle_val[1][1]);
// }
// __syncthreads();
// for (int i = threadIdx.x; i < max_stride * 2; i += blockDim.x) {
// d_input_a[b][s][input_base_idx + i] = s_grad[i];
// }
// }
// }
template <typename scalar_t, typename accscalar_t, bool increasing_stride>
__global__ void butterfly_multiply_untied_backward_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 5> twiddle_a,
const at::PackedTensorAccessor64<scalar_t, 4> output_a,
at::PackedTensorAccessor64<scalar_t, 5> d_twiddle_a,
at::PackedTensorAccessor64<scalar_t, 3> d_input_a,
int log_max_stride,
int log_n) {
const int batch_size = output_a.size(1);
const int s = blockIdx.z;
const int max_stride = 1 << log_max_stride;
const int input_base_idx = blockIdx.y * blockDim.x * 2;
__shared__ scalar_t s_grad[ELEMENTARY_SIZE * 2];
__shared__ accscalar_t s_twiddle[ELEMENTARY_SIZE][2][2]; // Use accscalar_t instead of scalar_t since we'll reuse the storage for s_d_twiddle
accscalar_t* s_d_twiddle = (accscalar_t *)&s_twiddle[0][0][0]; // Reusing the same storage as s_twiddle, have to be careful if we change the implemetnation.
int b = blockIdx.x * blockDim.y + threadIdx.y;
if (b < batch_size) {
for (int i = threadIdx.x; i < max_stride * 2; i += blockDim.x) {
s_grad[i + threadIdx.y * max_stride * 2] = d_input_a[b][s][input_base_idx + i];
}
}
int tid_x = threadIdx.x;
int tid_y = threadIdx.y;
int first_idx = increasing_stride ? 0 : log_n - 1 - log_max_stride;
for (int idx = first_idx + log_max_stride; idx >= first_idx; --idx) {
int log_stride = increasing_stride ? idx : log_n - 1 - idx;
int stride = 1 << log_stride;
if (tid_y == 0) {
s_twiddle[tid_x][0][0] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][0][0];
s_twiddle[tid_x][0][1] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][0][1];
s_twiddle[tid_x][1][0] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][1][0];
s_twiddle[tid_x][1][1] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][1][1];
}
int low_order_bits = tid_x & (stride - 1); // int low_order_bits = tid_x % stride;
int pos_x = 2 * (tid_x - low_order_bits) + low_order_bits;
int pos_y = tid_y * max_stride * 2;
int pos = pos_x + pos_y;
__syncthreads();
const scalar_t twiddle_val[2][2] = {{s_twiddle[tid_x][0][0], s_twiddle[tid_x][0][1]},
{s_twiddle[tid_x][1][0], s_twiddle[tid_x][1][1]}};
// Don't need to sync here since we sync later at sum_strided_atomic, so no writing to s_twiddle can occur until then
accscalar_t d_twiddle_val[2][2] = {{0, 0}, {0, 0}};
if (b < batch_size) {
const scalar_t grad_val[2] = {s_grad[pos], s_grad[pos + stride]};
s_grad[pos] = twiddle_val[0][0] * grad_val[0] + twiddle_val[1][0] * grad_val[1];
s_grad[pos + stride] = twiddle_val[0][1] * grad_val[0] + twiddle_val[1][1] * grad_val[1];
const scalar_t input_val[2] = {output_a[idx][b][s][input_base_idx + pos_x],
output_a[idx][b][s][input_base_idx + pos_x + stride]};
d_twiddle_val[0][0] = grad_val[0] * input_val[0];
d_twiddle_val[0][1] = grad_val[0] * input_val[1];
d_twiddle_val[1][0] = grad_val[1] * input_val[0];
d_twiddle_val[1][1] = grad_val[1] * input_val[1];
}
int tid = threadIdx.x + threadIdx.y * blockDim.x;
int nthreads = blockDim.x * blockDim.y;
sum_strided_atomic(reinterpret_cast<accscalar_t (&)[4]>(d_twiddle_val), s_d_twiddle, max_stride, nthreads, tid);
if (tid_y == 0) {
atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][0][0], s_d_twiddle[tid_x]);
atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][0][1], s_d_twiddle[tid_x + max_stride]);
atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][1][0], s_d_twiddle[tid_x + 2 * max_stride]);
atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][1][1], s_d_twiddle[tid_x + 3 * max_stride]);
}
__syncthreads(); // Otherwise s_d_twiddle will be overwritten with s_twiddle before some thread can read
}
if (b < batch_size) {
for (int i = threadIdx.x; i < max_stride * 2; i += blockDim.x) {
d_input_a[b][s][input_base_idx + i] = s_grad[i + threadIdx.y * max_stride * 2];
}
}
}
template <typename scalar_t, typename accscalar_t, bool increasing_stride>
__global__ void butterfly_multiply_untied_backward_complex_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 6> twiddle_a,
const at::PackedTensorAccessor64<scalar_t, 5> output_a,
at::PackedTensorAccessor64<scalar_t, 6> d_twiddle_a,
at::PackedTensorAccessor64<scalar_t, 4> d_input_a,
int log_max_stride,
int log_n) {
using complex_t = thrust::complex<scalar_t>;
using acccomplex_t = thrust::complex<accscalar_t>;
const int batch_size = output_a.size(1);
const int s = blockIdx.z;
const int max_stride = 1 << log_max_stride;
const int input_base_idx = blockIdx.y * blockDim.x * 2;
// __shared__ scalar_t s_grad[ELEMENTARY_SIZE * 2][2];
__shared__ scalar_t s_grad_storage[ELEMENTARY_SIZE * 2][2];
complex_t* s_grad = (complex_t *)&s_grad_storage[0]; // To avoid warning about race-condition when initializing complex_t
int b = blockIdx.x * blockDim.y + threadIdx.y;
if (b < batch_size) { // Currently we assume 1 batch per thread block, so all threads in the block should enter (otherwise deadlock)
for (int i = threadIdx.x; i < max_stride * 2; i += blockDim.x) {
s_grad[i] = complex_t(d_input_a[b][s][input_base_idx + i][0], d_input_a[b][s][input_base_idx + i][1]);
}
int i = threadIdx.x;
int first_idx = increasing_stride ? 0 : log_n - 1 - log_max_stride;
for (int idx = first_idx + log_max_stride; idx >= first_idx; --idx) {
int log_stride = increasing_stride ? idx : log_n - 1 - idx;
int stride = 1 << log_stride;
int low_order_bits = i & (stride - 1); // int low_order_bits = i % stride;
int pos = 2 * (i - low_order_bits) + low_order_bits;
const complex_t twiddle_val[2][2] =
{{complex_t(twiddle_a[s][log_stride][input_base_idx / 2 + i][0][0][0], twiddle_a[s][log_stride][input_base_idx / 2 + i][0][0][1]),
complex_t(twiddle_a[s][log_stride][input_base_idx / 2 + i][0][1][0], twiddle_a[s][log_stride][input_base_idx / 2 + i][0][1][1])},
{complex_t(twiddle_a[s][log_stride][input_base_idx / 2 + i][1][0][0], twiddle_a[s][log_stride][input_base_idx / 2 + i][1][0][1]),
complex_t(twiddle_a[s][log_stride][input_base_idx / 2 + i][1][1][0], twiddle_a[s][log_stride][input_base_idx / 2 + i][1][1][1])}};
__syncthreads();
const complex_t grad_val[2] = {s_grad[pos], s_grad[pos + stride]};
s_grad[pos] = thrust::conj(twiddle_val[0][0]) * grad_val[0] + thrust::conj(twiddle_val[1][0]) * grad_val[1];
s_grad[pos + stride] = thrust::conj(twiddle_val[0][1]) * grad_val[0] + thrust::conj(twiddle_val[1][1]) * grad_val[1];
const complex_t input_val[2] =
{complex_t(output_a[idx][b][s][input_base_idx + pos][0], output_a[idx][b][s][input_base_idx + pos][1]),
complex_t(output_a[idx][b][s][input_base_idx + pos + stride][0], output_a[idx][b][s][input_base_idx + pos + stride][1])};
acccomplex_t d_twiddle_val[2][2] =
{{grad_val[0] * thrust::conj(input_val[0]), grad_val[0] * thrust::conj(input_val[1])},
{grad_val[1] * thrust::conj(input_val[0]), grad_val[1] * thrust::conj(input_val[1])}};
atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + i][0][0][0], d_twiddle_val[0][0].real());
atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + i][0][0][1], d_twiddle_val[0][0].imag());
atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + i][0][1][0], d_twiddle_val[0][1].real());
atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + i][0][1][1], d_twiddle_val[0][1].imag());
atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + i][1][0][0], d_twiddle_val[1][0].real());
atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + i][1][0][1], d_twiddle_val[1][0].imag());
atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + i][1][1][0], d_twiddle_val[1][1].real());
atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + i][1][1][1], d_twiddle_val[1][1].imag());
}
__syncthreads();
for (int i = threadIdx.x; i < max_stride * 2; i += blockDim.x) {
d_input_a[b][s][input_base_idx + i][0] = s_grad[i].real();
d_input_a[b][s][input_base_idx + i][1] = s_grad[i].imag();
}
}
}
template <typename scalar_t, typename accscalar_t, bool increasing_stride>
__global__ void butterfly_multiply_untied_backward_onestep_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 5> twiddle_a,
const at::PackedTensorAccessor64<scalar_t, 4> output_a,
at::PackedTensorAccessor64<scalar_t, 5> d_twiddle_a,
at::PackedTensorAccessor64<scalar_t, 3> d_input_a,
int log_stride,
int log_n) {
const int batch_size = output_a.size(1);
const int s = blockIdx.z;
const int idx = increasing_stride ? log_stride : (log_n - 1 - log_stride); // Index to access output_a
const int n = output_a.size(3);
int stride = 1 << log_stride;
int i = blockIdx.y * blockDim.x + threadIdx.x;
if (i > n) return;
int low_order_bits = i & (stride - 1); // int low_order_bits = i % stride;
int pos = 2 * (i - low_order_bits) + low_order_bits;
const scalar_t twiddle_val[2][2] = {{twiddle_a[s][log_stride][i][0][0], twiddle_a[s][log_stride][i][0][1]},
{twiddle_a[s][log_stride][i][1][0], twiddle_a[s][log_stride][i][1][1]}};
accscalar_t d_twiddle_val[2][2] = {{0, 0}, {0, 0}};
for (int b = blockIdx.x * blockDim.y + threadIdx.y; b < batch_size; b += blockDim.y * gridDim.x) {
const scalar_t grad_val[2] = {d_input_a[b][s][pos], d_input_a[b][s][pos + stride]};
d_input_a[b][s][pos] = twiddle_val[0][0] * grad_val[0] + twiddle_val[1][0] * grad_val[1];
d_input_a[b][s][pos + stride] = twiddle_val[0][1] * grad_val[0] + twiddle_val[1][1] * grad_val[1];
const scalar_t input_val[2] = {output_a[idx][b][s][pos], output_a[idx][b][s][pos + stride]};
d_twiddle_val[0][0] += grad_val[0] * input_val[0];
d_twiddle_val[0][1] += grad_val[0] * input_val[1];
d_twiddle_val[1][0] += grad_val[1] * input_val[0];
d_twiddle_val[1][1] += grad_val[1] * input_val[1];
}
atomicAdd(&d_twiddle_a[s][log_stride][i][0][0], d_twiddle_val[0][0]);
atomicAdd(&d_twiddle_a[s][log_stride][i][0][1], d_twiddle_val[0][1]);
atomicAdd(&d_twiddle_a[s][log_stride][i][1][0], d_twiddle_val[1][0]);
atomicAdd(&d_twiddle_a[s][log_stride][i][1][1], d_twiddle_val[1][1]);
}
template <typename scalar_t, typename accscalar_t, bool increasing_stride>
__global__ void butterfly_multiply_untied_backward_onestep_complex_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 6> twiddle_a,
const at::PackedTensorAccessor64<scalar_t, 5> output_a,
at::PackedTensorAccessor64<scalar_t, 6> d_twiddle_a,
at::PackedTensorAccessor64<scalar_t, 4> d_input_a,
int log_stride,
int log_n) {
using complex_t = thrust::complex<scalar_t>;
using acccomplex_t = thrust::complex<accscalar_t>;
const int batch_size = output_a.size(1);
const int s = blockIdx.z;
const int idx = increasing_stride ? log_stride : (log_n - 1 - log_stride); // Index to access output_a
const int n = output_a.size(3);
int stride = 1 << log_stride;
int i = blockIdx.y * blockDim.x + threadIdx.x;
if (i > n) return;
int low_order_bits = i & (stride - 1); // int low_order_bits = i % stride;
int pos = 2 * (i - low_order_bits) + low_order_bits;
const complex_t twiddle_val[2][2] =
{{complex_t(twiddle_a[s][log_stride][i][0][0][0], twiddle_a[s][log_stride][i][0][0][1]),
complex_t(twiddle_a[s][log_stride][i][0][1][0], twiddle_a[s][log_stride][i][0][1][1])},
{complex_t(twiddle_a[s][log_stride][i][1][0][0], twiddle_a[s][log_stride][i][1][0][1]),
complex_t(twiddle_a[s][log_stride][i][1][1][0], twiddle_a[s][log_stride][i][1][1][1])}};
acccomplex_t d_twiddle_val[2][2] = {{{0, 0}, {0, 0}}, {{0, 0}, {0, 0}}};
for (int b = blockIdx.x * blockDim.y + threadIdx.y; b < batch_size; b += blockDim.y * gridDim.x) {
const complex_t grad_val[2] = {complex_t(d_input_a[b][s][pos][0], d_input_a[b][s][pos][1]),
complex_t(d_input_a[b][s][pos + stride][0], d_input_a[b][s][pos + stride][1])};
const complex_t d_input_val[2] =
{thrust::conj(twiddle_val[0][0]) * grad_val[0] + thrust::conj(twiddle_val[1][0]) * grad_val[1],
thrust::conj(twiddle_val[0][1]) * grad_val[0] + thrust::conj(twiddle_val[1][1]) * grad_val[1]};
d_input_a[b][s][pos][0] = d_input_val[0].real();
d_input_a[b][s][pos][1] = d_input_val[0].imag();
d_input_a[b][s][pos + stride][0] = d_input_val[1].real();
d_input_a[b][s][pos + stride][1] = d_input_val[1].imag();
const complex_t input_val[2] =
{complex_t(output_a[idx][b][s][pos][0], output_a[idx][b][s][pos][1]),
complex_t(output_a[idx][b][s][pos + stride][0], output_a[idx][b][s][pos + stride][1])};
d_twiddle_val[0][0] += grad_val[0] * thrust::conj(input_val[0]);
d_twiddle_val[0][1] += grad_val[0] * thrust::conj(input_val[1]);
d_twiddle_val[1][0] += grad_val[1] * thrust::conj(input_val[0]);
d_twiddle_val[1][1] += grad_val[1] * thrust::conj(input_val[1]);
}
atomicAdd(&d_twiddle_a[s][log_stride][i][0][0][0], d_twiddle_val[0][0].real());
atomicAdd(&d_twiddle_a[s][log_stride][i][0][0][1], d_twiddle_val[0][0].imag());
atomicAdd(&d_twiddle_a[s][log_stride][i][0][1][0], d_twiddle_val[0][1].real());
atomicAdd(&d_twiddle_a[s][log_stride][i][0][1][1], d_twiddle_val[0][1].imag());
atomicAdd(&d_twiddle_a[s][log_stride][i][1][0][0], d_twiddle_val[1][0].real());
atomicAdd(&d_twiddle_a[s][log_stride][i][1][0][1], d_twiddle_val[1][0].imag());
atomicAdd(&d_twiddle_a[s][log_stride][i][1][1][0], d_twiddle_val[1][1].real());
atomicAdd(&d_twiddle_a[s][log_stride][i][1][1][1], d_twiddle_val[1][1].imag());
}
void butterfly_multiply_untied_backward_cuda(const at::Tensor& twiddle, const at::Tensor& output,
at::Tensor& d_twiddle, at::Tensor& d_input, bool increasing_stride) {
const int batch_size = output.size(1);
const int nstack = output.size(2);
const int n = output.size(3);
const int log_n = int(log2((double) n));
const bool complex = output.dim() == 5;
AT_DISPATCH_FLOATING_TYPES(output.scalar_type(), "butterfly_multiply_untied_backward_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
if (!complex) { // real
const auto twiddle_a = twiddle.packed_accessor64<scalar_t, 5>();
const auto output_a = output.packed_accessor64<scalar_t, 4>();
auto d_twiddle_a = d_twiddle.packed_accessor64<scalar_t, 5>();
auto d_input_a = d_input.packed_accessor64<scalar_t, 3>();
if (increasing_stride) {
int log_stride = log_n - 1;
for (; (1 << log_stride) > ELEMENTARY_SIZE; --log_stride) {
dim3 block(MAX_BLOCK_SIZE / 2);
dim3 grid(div_up(batch_size, WORK_PER_THREAD), div_up(n / 2, MAX_BLOCK_SIZE / 2), nstack);
butterfly_multiply_untied_backward_onestep_cuda_kernel<scalar_t, accscalar_t, true>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, d_twiddle_a, d_input_a, log_stride, log_n);
}
int stride = 1 << log_stride;
dim3 block(stride, div_up(MAX_BLOCK_SIZE, stride * 2));
dim3 grid(div_up(batch_size, block.y), div_up(n / 2, stride), nstack);
// dim3 block(stride);
// dim3 grid(batch_size, div_up(n / 2, stride), nstack);
butterfly_multiply_untied_backward_cuda_kernel<scalar_t, accscalar_t, true>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, d_twiddle_a, d_input_a, log_stride, log_n);
} else {
int stride = std::min<int>(ELEMENTARY_SIZE, n / 2);
int log_stride = int(log2((double) stride));
dim3 block(stride, div_up(MAX_BLOCK_SIZE, stride * 2));
dim3 grid(div_up(batch_size, block.y), div_up(n / 2, stride), nstack);
// dim3 block(stride);
// dim3 grid(batch_size, div_up(n / 2, stride), nstack);
butterfly_multiply_untied_backward_cuda_kernel<scalar_t, accscalar_t, false>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, d_twiddle_a, d_input_a, log_stride, log_n);
for (log_stride++; log_stride <= log_n - 1; ++log_stride) {
dim3 block(MAX_BLOCK_SIZE / 2);
dim3 grid(div_up(batch_size, WORK_PER_THREAD), div_up(n / 2, MAX_BLOCK_SIZE / 2), nstack);
butterfly_multiply_untied_backward_onestep_cuda_kernel<scalar_t, accscalar_t, false>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, d_twiddle_a, d_input_a, log_stride, log_n);
}
}
} else { // complex
const auto twiddle_a = twiddle.packed_accessor64<scalar_t, 6>();
const auto output_a = output.packed_accessor64<scalar_t, 5>();
auto d_twiddle_a = d_twiddle.packed_accessor64<scalar_t, 6>();
auto d_input_a = d_input.packed_accessor64<scalar_t, 4>();
if (increasing_stride) {
int log_stride = log_n - 1;
for (; (1 << log_stride) > ELEMENTARY_SIZE; --log_stride) {
dim3 block(MAX_BLOCK_SIZE / 2);
dim3 grid(div_up(batch_size, WORK_PER_THREAD), div_up(n / 2, MAX_BLOCK_SIZE / 2), nstack);
butterfly_multiply_untied_backward_onestep_complex_cuda_kernel<scalar_t, accscalar_t, true>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, d_twiddle_a, d_input_a, log_stride, log_n);
}
int stride = 1 << log_stride;
dim3 block(stride);
dim3 grid(batch_size, div_up(n / 2, stride), nstack);
butterfly_multiply_untied_backward_complex_cuda_kernel<scalar_t, accscalar_t, true>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, d_twiddle_a, d_input_a, log_stride, log_n);
} else {
int stride = std::min<int>(ELEMENTARY_SIZE, n / 2);
int log_stride = int(log2((double) stride));
dim3 block(stride);
dim3 grid(batch_size, div_up(n / 2, stride), nstack);
butterfly_multiply_untied_backward_complex_cuda_kernel<scalar_t, accscalar_t, false>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, d_twiddle_a, d_input_a, log_stride, log_n);
for (log_stride++; log_stride <= log_n - 1; ++log_stride) {
dim3 block(MAX_BLOCK_SIZE / 2);
dim3 grid(div_up(batch_size, WORK_PER_THREAD), div_up(n / 2, MAX_BLOCK_SIZE / 2), nstack);
butterfly_multiply_untied_backward_onestep_complex_cuda_kernel<scalar_t, accscalar_t, false>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, d_twiddle_a, d_input_a, log_stride, log_n);
}
}
}
});
TORCH_CHECK(cudaGetLastError() == cudaSuccess,
"butterfly_multiply_untied_backward_cuda failed with error code ",
cudaGetLastError());
}
template <typename scalar_t, typename accscalar_t, bool increasing_stride, int log_max_stride,
typename Function0, typename Function1, typename Function2>
__global__ void butterfly_multiply_untied_forward_backward_cuda_kernel(const CudaAcsr32<scalar_t, 5> twiddle_a,
Function0 load_input,
Function1 load_grad,
CudaAcsr32<scalar_t, 5> d_twiddle_a,
Function2 save_d_input,
int batch_size) {
const int s = blockIdx.y + gridDim.y * blockIdx.z; // For conv2d butterfly as well
const int max_stride = 1 << log_max_stride;
const int input_base_idx = 0;
__shared__ scalar_t s_input[ELEMENTARY_SIZE * 2];
__shared__ scalar_t s_twiddle[ELEMENTARY_SIZE][2][2];
// Forward pass to compute the intermediate values
scalar_t input_val_storage[MAX_N_FACTORS][2]; // Storing inputs for backward pass
load_input(s_input);
int b = blockIdx.x * blockDim.y + threadIdx.y;
int tid_x = threadIdx.x;
int tid_y = threadIdx.y;
#pragma unroll
for (int idx = 0; idx <= log_max_stride; ++idx) { // Let's not skip steps for now
int log_stride = increasing_stride ? idx : log_max_stride - idx;
int stride = 1 << log_stride;
if (tid_y == 0) {
s_twiddle[tid_x][0][0] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][0][0];
s_twiddle[tid_x][0][1] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][0][1];
s_twiddle[tid_x][1][0] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][1][0];
s_twiddle[tid_x][1][1] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][1][1];
}
int low_order_bits = tid_x & (stride - 1); // int low_order_bits = tid_x % stride;
int pos_x = 2 * (tid_x - low_order_bits) + low_order_bits;
int pos_y = tid_y * max_stride * 2;
int pos = pos_x + pos_y;
__syncthreads();
const scalar_t twiddle_val[2][2] = {{s_twiddle[tid_x][0][0], s_twiddle[tid_x][0][1]},
{s_twiddle[tid_x][1][0], s_twiddle[tid_x][1][1]}};
if (b < batch_size) {
const scalar_t input_val[2] = {s_input[pos], s_input[pos + stride]};
input_val_storage[idx][0] = input_val[0];
input_val_storage[idx][1] = input_val[1];
s_input[pos] = twiddle_val[0][0] * input_val[0] + twiddle_val[0][1] * input_val[1];
s_input[pos + stride] = twiddle_val[1][0] * input_val[0] + twiddle_val[1][1] * input_val[1];
}
__syncthreads();
// otherwise some thread might go back to writing to s_twiddle before other thread can read
// or s_s_input will be overwritten with s_grad before some thread can read
}
// Backward pass
scalar_t* s_grad = &s_input[0]; // Reusing the same storage as s_input
__shared__ accscalar_t s_d_twiddle[ELEMENTARY_SIZE][2][2];
load_grad(s_grad);
#pragma unroll
for (int idx = log_max_stride; idx >= 0; --idx) {
int log_stride = increasing_stride ? idx : log_max_stride - idx;
int stride = 1 << log_stride;
// tid_y == 0 is writing (atomicAdd) so tid_y == -1 can do the reading, instead of having to wait for tid_y == 0
if (tid_y == blockDim.y - 1) {
s_twiddle[tid_x][0][0] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][0][0];
s_twiddle[tid_x][0][1] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][0][1];
s_twiddle[tid_x][1][0] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][1][0];
s_twiddle[tid_x][1][1] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][1][1];
}
int low_order_bits = tid_x & (stride - 1); // int low_order_bits = tid_x % stride;
int pos_x = 2 * (tid_x - low_order_bits) + low_order_bits;
int pos_y = tid_y * max_stride * 2;
int pos = pos_x + pos_y;
__syncthreads();
const scalar_t twiddle_val[2][2] = {{s_twiddle[tid_x][0][0], s_twiddle[tid_x][0][1]},
{s_twiddle[tid_x][1][0], s_twiddle[tid_x][1][1]}};
if (b < batch_size) {
const scalar_t grad_val[2] = {s_grad[pos], s_grad[pos + stride]};
s_grad[pos] = twiddle_val[0][0] * grad_val[0] + twiddle_val[1][0] * grad_val[1];
s_grad[pos + stride] = twiddle_val[0][1] * grad_val[0] + twiddle_val[1][1] * grad_val[1];
const scalar_t input_val[2] = {input_val_storage[idx][0], input_val_storage[idx][1]};
s_d_twiddle[tid_x + tid_y * max_stride][0][0] = grad_val[0] * input_val[0];
s_d_twiddle[tid_x + tid_y * max_stride][0][1] = grad_val[0] * input_val[1];
s_d_twiddle[tid_x + tid_y * max_stride][1][0] = grad_val[1] * input_val[0];
s_d_twiddle[tid_x + tid_y * max_stride][1][1] = grad_val[1] * input_val[1];
}
__syncthreads();
if (tid_y == 0) {
accscalar_t d_twiddle_val[2][2] = {{0, 0}, {0, 0}};
for (int i = 0; i < blockDim.y; ++i) {
if (blockIdx.x * blockDim.y + i < batch_size) {
d_twiddle_val[0][0] += s_d_twiddle[tid_x + i * max_stride][0][0];
d_twiddle_val[0][1] += s_d_twiddle[tid_x + i * max_stride][0][1];
d_twiddle_val[1][0] += s_d_twiddle[tid_x + i * max_stride][1][0];
d_twiddle_val[1][1] += s_d_twiddle[tid_x + i * max_stride][1][1];
}
}
atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][0][0], d_twiddle_val[0][0]);
atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][0][1], d_twiddle_val[0][1]);
atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][1][0], d_twiddle_val[1][0]);
atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][1][1], d_twiddle_val[1][1]);
}
}
save_d_input(s_grad);
}
void butterfly_multiply_untied_forward_backward_cuda(const at::Tensor& twiddle, const at::Tensor& input, const at::Tensor& grad,
at::Tensor& d_twiddle, at::Tensor& d_input, bool increasing_stride) {
int batch_size = input.size(0);
const int nstack = input.size(1);
const int n = input.size(2);
const int log_n = int(log2((double) n));
AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "butterfly_multiply_untied_forward_backward_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
const auto twiddle_a = twiddle.packed_accessor32<scalar_t, 5, at::RestrictPtrTraits>();
const auto input_a = input.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>();
const auto grad_a = grad.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>();
auto d_twiddle_a = d_twiddle.packed_accessor32<scalar_t, 5, at::RestrictPtrTraits>();
auto d_input_a = d_input.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>();
int stride = std::min<int>(ELEMENTARY_SIZE, n / 2);
int log_stride = int(log2((double) stride));
dim3 block(stride, div_up(MAX_BLOCK_SIZE, stride * 2));
dim3 grid(div_up(batch_size, block.y), 1, nstack);
auto load_input = [batch_size, stride, input_a] __device__ (scalar_t* s_input) {
const int b = blockIdx.x * blockDim.y + threadIdx.y;
const int s = blockIdx.z;
if (b < batch_size) {
for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) {
s_input[i + threadIdx.y * stride * 2] = input_a[b][s][i];
}
}
};
auto load_grad = [batch_size, stride, grad_a] __device__ (scalar_t* s_grad) {
const int b = blockIdx.x * blockDim.y + threadIdx.y;
const int s = blockIdx.z;
if (b < batch_size) {
for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) {
s_grad[i + threadIdx.y * stride * 2] = grad_a[b][s][i];
}
}
};
auto save_d_input = [batch_size, stride, d_input_a] __device__ (scalar_t* s_grad) mutable {
const int b = blockIdx.x * blockDim.y + threadIdx.y;
const int s = blockIdx.z;
if (b < batch_size) {
for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) {
d_input_a[b][s][i] = s_grad[i + threadIdx.y * stride * 2];
}
}
};
switch (log_stride)
{
case 0:
increasing_stride ? butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, true, 0>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size)
: butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, false, 0>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size); break;
case 1:
increasing_stride ? butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, true, 1>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size)
: butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, false, 1>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size); break;
case 2:
increasing_stride ? butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, true, 2>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size)
: butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, false, 2>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size); break;
case 3:
increasing_stride ? butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, true, 3>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size)
: butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, false, 3>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size); break;
case 4:
increasing_stride ? butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, true, 4>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size)
: butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, false, 4>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size); break;
case 5:
increasing_stride ? butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, true, 5>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size)
: butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, false, 5>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size); break;
case 6:
increasing_stride ? butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, true, 6>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size)
: butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, false, 6>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size); break;
case 7:
increasing_stride ? butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, true, 7>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size)
: butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, false, 7>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size); break;
case 8:
increasing_stride ? butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, true, 8>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size)
: butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, false, 8>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size); break;
case 9:
increasing_stride ? butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, true, 9>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size)
: butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, false, 9>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size); break;
}
});
TORCH_CHECK(cudaGetLastError() == cudaSuccess,
"butterfly_multiply_untied_forward_backward_cuda failed with error code ",
cudaGetLastError());
}
template <typename scalar_t, bool increasing_stride, typename Function0, typename Function1>
__global__ void butterfly_ortho_multiply_tied_cuda_kernel(const CudaAcsr32<scalar_t, 2> twiddle_cos_a,
const CudaAcsr32<scalar_t, 2> twiddle_sin_a,
Function0 load_input,
Function1 save_output,
int log_max_stride,
int batch_size) {
const int s = blockIdx.y + gridDim.y * blockIdx.z; // For conv2d butterfly_ortho as well
const int max_stride = 1 << log_max_stride;
__shared__ scalar_t s_input[ELEMENTARY_SIZE * 2];
__shared__ scalar_t s_twiddle[ELEMENTARY_SIZE][2];
load_input(s_input);
int b = blockIdx.x * blockDim.y + threadIdx.y;
int tid_x = threadIdx.x;
int tid_y = threadIdx.y;
for (int idx = 0; idx < (log_max_stride + 1); ++idx) {
int log_stride = increasing_stride ? idx : log_max_stride - idx;
int stride = 1 << log_stride;
int twiddle_start_idx = stride - 1;
if ((tid_y == 0) && (tid_x < stride)) {
s_twiddle[tid_x][0] = twiddle_cos_a[s][twiddle_start_idx + tid_x];
s_twiddle[tid_x][1] = twiddle_sin_a[s][twiddle_start_idx + tid_x];
}
int low_order_bits = tid_x & (stride - 1); // int low_order_bits = tid_x % stride;
int twiddle_idx = low_order_bits;
int pos_x = 2 * (tid_x - low_order_bits) + low_order_bits;
int pos_y = tid_y * max_stride * 2;
int pos = pos_x + pos_y;
__syncthreads();
const scalar_t twiddle_val[2] = {s_twiddle[twiddle_idx][0], s_twiddle[twiddle_idx][1]};
if (b < batch_size) {
const scalar_t input_val[2] = {s_input[pos], s_input[pos + stride]};
s_input[pos] = twiddle_val[0] * input_val[0] - twiddle_val[1] * input_val[1];
s_input[pos + stride] = twiddle_val[1] * input_val[0] + twiddle_val[0] * input_val[1];
}
__syncthreads();
// otherwise some thread might go back to writing to s_twiddle before other thread can read
}
save_output(s_input);
}
void butterfly_ortho_multiply_tied_cuda(const at::Tensor& twiddle_cos, const at::Tensor& twiddle_sin, const at::Tensor& input, at::Tensor& output, bool increasing_stride) {
int batch_size = input.size(0);
const int nstack = input.size(1);
const int n = input.size(2);
const int log_n = int(log2((double) n));
AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "butterfly_ortho_multiply_tied_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
const auto twiddle_cos_a = twiddle_cos.packed_accessor32<scalar_t, 2, at::RestrictPtrTraits>();
const auto twiddle_sin_a = twiddle_sin.packed_accessor32<scalar_t, 2, at::RestrictPtrTraits>();
const auto input_a = input.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>();
auto output_a = output.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>();
int stride = std::min<int>(ELEMENTARY_SIZE, n / 2);
int log_stride = int(log2((double) stride));
dim3 block(stride, div_up(MAX_BLOCK_SIZE, stride * 2));
dim3 grid(div_up(batch_size, block.y), 1, nstack);
auto load_input = [batch_size, stride, input_a] __device__ (scalar_t* s_input) {
const int b = blockIdx.x * blockDim.y + threadIdx.y;
const int s = blockIdx.z;
if (b < batch_size) {
for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) {
s_input[i + threadIdx.y * stride * 2] = input_a[b][s][i];
}
}
};
auto save_output = [batch_size, stride, output_a] __device__ (scalar_t* s_input) mutable {
const int b = blockIdx.x * blockDim.y + threadIdx.y;
const int s = blockIdx.z;
if (b < batch_size) {
for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) {
output_a[b][s][i] = s_input[i + threadIdx.y * stride * 2];
}
}
};
increasing_stride ? butterfly_ortho_multiply_tied_cuda_kernel<scalar_t, true>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_cos_a, twiddle_sin_a, load_input, save_output, log_stride, batch_size)
: butterfly_ortho_multiply_tied_cuda_kernel<scalar_t, false>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_cos_a, twiddle_sin_a, load_input, save_output, log_stride, batch_size);
});
TORCH_CHECK(cudaGetLastError() == cudaSuccess,
"butterfly_ortho_multiply_tied_cuda failed with error code ",
cudaGetLastError());
}
template <typename scalar_t, typename accscalar_t, bool increasing_stride,
typename Function0, typename Function1, typename Function2>
__global__ void butterfly_ortho_multiply_tied_backward_cuda_kernel(const CudaAcsr32<scalar_t, 2> twiddle_cos_a,
const CudaAcsr32<scalar_t, 2> twiddle_sin_a,
Function0 load_output,
Function1 load_grad,
CudaAcsr32<scalar_t, 2> d_twiddle_a,
Function2 save_d_input,
int log_max_stride,
int batch_size) {
const int s = blockIdx.y + gridDim.y * blockIdx.z; // For conv2d butterfly_ortho as well
const int max_stride = 1 << log_max_stride;
__shared__ scalar_t s_output[ELEMENTARY_SIZE * 2];
__shared__ scalar_t s_grad[ELEMENTARY_SIZE * 2];
__shared__ scalar_t s_twiddle[ELEMENTARY_SIZE][2];
__shared__ accscalar_t s_d_twiddle[ELEMENTARY_SIZE];
int b = blockIdx.x * blockDim.y + threadIdx.y;
int tid_x = threadIdx.x;
int tid_y = threadIdx.y;
load_output(s_output);
load_grad(s_grad);
for (int idx = log_max_stride; idx >= 0; --idx) {
int log_stride = increasing_stride ? idx : log_max_stride - idx;
int stride = 1 << log_stride;
int twiddle_start_idx = stride - 1;
// tid_y == 0 is writing (atomicAdd) so tid_y == -1 can do the reading, instead of having to wait for tid_y == 0
if ((tid_y == blockDim.y - 1) && (tid_x < stride)) {
s_twiddle[tid_x][0] = twiddle_cos_a[s][twiddle_start_idx + tid_x];
s_twiddle[tid_x][1] = twiddle_sin_a[s][twiddle_start_idx + tid_x];
}
int low_order_bits = tid_x & (stride - 1); // int low_order_bits = tid_x % stride;
int twiddle_idx = low_order_bits;
int pos_x = 2 * (tid_x - low_order_bits) + low_order_bits;
int pos_y = tid_y * max_stride * 2;
int pos = pos_x + pos_y;
__syncthreads();
const scalar_t twiddle_val[2] = {s_twiddle[twiddle_idx][0], s_twiddle[twiddle_idx][1]};
scalar_t d_twiddle_val[1] = {0}; // Idk, to be consistent with sum_strided's interface
if (b < batch_size) {
const scalar_t grad_val[2] = {s_grad[pos], s_grad[pos + stride]};
s_grad[pos] = twiddle_val[0] * grad_val[0] + twiddle_val[1] * grad_val[1];
s_grad[pos + stride] = -twiddle_val[1] * grad_val[0] + twiddle_val[0] * grad_val[1];
const scalar_t output_val[2] = {s_output[pos], s_output[pos + stride]};
const scalar_t input_val[2] = {twiddle_val[0] * output_val[0] + twiddle_val[1] * output_val[1],
-twiddle_val[1] * output_val[0] + twiddle_val[0] * output_val[1]};
s_output[pos] = input_val[0];
s_output[pos + stride] = input_val[1];
d_twiddle_val[0]
= (grad_val[0] * input_val[0] + grad_val[1] * input_val[1]) * (-twiddle_val[1])
+ (-grad_val[0] * input_val[1] + grad_val[1] * input_val[0]) * twiddle_val[0];
}
int tid = tid_x + tid_y * blockDim.x;
int nthreads = blockDim.x * blockDim.y;
sum_strided_atomic(reinterpret_cast<accscalar_t (&)[1]>(d_twiddle_val), s_d_twiddle, stride, nthreads, tid);
if ((tid_y == 0) && (tid_x < stride)) {
atomicAdd(&d_twiddle_a[s][twiddle_start_idx + twiddle_idx], s_d_twiddle[twiddle_idx]);
}
}
save_d_input(s_grad);
}
void butterfly_ortho_multiply_tied_backward_cuda(const at::Tensor& twiddle_cos, const at::Tensor& twiddle_sin, const at::Tensor& output,
const at::Tensor& grad, at::Tensor& d_twiddle, at::Tensor& d_input, bool increasing_stride) {
int batch_size = output.size(0);
const int nstack = output.size(1);
const int n = output.size(2);
const int log_n = int(log2((double) n));
AT_DISPATCH_FLOATING_TYPES(output.scalar_type(), "butterfly_ortho_multiply_tied_backward_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
const auto twiddle_cos_a = twiddle_cos.packed_accessor32<scalar_t, 2, at::RestrictPtrTraits>();
const auto twiddle_sin_a = twiddle_sin.packed_accessor32<scalar_t, 2, at::RestrictPtrTraits>();
const auto output_a = output.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>();
const auto grad_a = grad.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>();
auto d_twiddle_a = d_twiddle.packed_accessor32<scalar_t, 2, at::RestrictPtrTraits>();
auto d_input_a = d_input.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>();
int stride = std::min<int>(ELEMENTARY_SIZE, n / 2);
int log_stride = int(log2((double) stride));
dim3 block(stride, div_up(MAX_BLOCK_SIZE, stride * 2));
dim3 grid(div_up(batch_size, block.y), 1, nstack);
auto load_output = [batch_size, stride, output_a] __device__ (scalar_t* s_output) {
const int b = blockIdx.x * blockDim.y + threadIdx.y;
const int s = blockIdx.z;
if (b < batch_size) {
for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) {
s_output[i + threadIdx.y * stride * 2] = output_a[b][s][i];
}
}
};
auto load_grad = [batch_size, stride, grad_a] __device__ (scalar_t* s_grad) {
const int b = blockIdx.x * blockDim.y + threadIdx.y;
const int s = blockIdx.z;
if (b < batch_size) {
for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) {
s_grad[i + threadIdx.y * stride * 2] = grad_a[b][s][i];
}
}
};
auto save_d_input = [batch_size, stride, d_input_a] __device__ (scalar_t* s_grad) mutable {
const int b = blockIdx.x * blockDim.y + threadIdx.y;
const int s = blockIdx.z;
if (b < batch_size) {
for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) {
d_input_a[b][s][i] = s_grad[i + threadIdx.y * stride * 2];
}
}
};
increasing_stride ? butterfly_ortho_multiply_tied_backward_cuda_kernel<scalar_t, accscalar_t, true>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_cos_a, twiddle_sin_a, load_output, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size)
: butterfly_ortho_multiply_tied_backward_cuda_kernel<scalar_t, accscalar_t, false>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_cos_a, twiddle_sin_a, load_output, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size);
});
TORCH_CHECK(cudaGetLastError() == cudaSuccess,
"butterfly_ortho_multiply_tied_backward_cuda failed with error code ",
cudaGetLastError());
}
template <typename scalar_t, bool increasing_stride, typename Function0, typename Function1>
__global__ void butterfly_ortho_multiply_untied_cuda_kernel(const CudaAcsr32<scalar_t, 3> twiddle_cos_a,
const CudaAcsr32<scalar_t, 3> twiddle_sin_a,
Function0 load_input,
Function1 save_output,
int log_max_stride,
int batch_size) {
const int s = blockIdx.y + gridDim.y * blockIdx.z; // For conv2d butterfly_ortho as well
const int max_stride = 1 << log_max_stride;
const int input_base_idx = 0;
__shared__ scalar_t s_input[ELEMENTARY_SIZE * 2];
__shared__ scalar_t s_twiddle[ELEMENTARY_SIZE][2];
load_input(s_input);
int b = blockIdx.x * blockDim.y + threadIdx.y;
int tid_x = threadIdx.x;
int tid_y = threadIdx.y;
for (int idx = 0; idx < (log_max_stride + 1); ++idx) {
int log_stride = increasing_stride ? idx : log_max_stride - idx;
int stride = 1 << log_stride;
if (tid_y == 0) {
s_twiddle[tid_x][0] = twiddle_cos_a[s][log_stride][input_base_idx / 2 + tid_x];
s_twiddle[tid_x][1] = twiddle_sin_a[s][log_stride][input_base_idx / 2 + tid_x];
}
int low_order_bits = tid_x & (stride - 1); // int low_order_bits = tid_x % stride;
int pos_x = 2 * (tid_x - low_order_bits) + low_order_bits;
int pos_y = tid_y * max_stride * 2;
int pos = pos_x + pos_y;
__syncthreads();
const scalar_t twiddle_val[2] = {s_twiddle[tid_x][0], s_twiddle[tid_x][1]};
if (b < batch_size) {
const scalar_t input_val[2] = {s_input[pos], s_input[pos + stride]};
s_input[pos] = twiddle_val[0] * input_val[0] - twiddle_val[1] * input_val[1];
s_input[pos + stride] = twiddle_val[1] * input_val[0] + twiddle_val[0] * input_val[1];
}
__syncthreads();
// otherwise some thread might go back to writing to s_twiddle before other thread can read
}
save_output(s_input);
}
void butterfly_ortho_multiply_untied_cuda(const at::Tensor& twiddle_cos, const at::Tensor& twiddle_sin, const at::Tensor& input, at::Tensor& output, bool increasing_stride) {
int batch_size = input.size(0);
const int nstack = input.size(1);
const int n = input.size(2);
const int log_n = int(log2((double) n));
AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "butterfly_ortho_multiply_untied_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
const auto twiddle_cos_a = twiddle_cos.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>();
const auto twiddle_sin_a = twiddle_sin.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>();
const auto input_a = input.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>();
auto output_a = output.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>();
int stride = std::min<int>(ELEMENTARY_SIZE, n / 2);
int log_stride = int(log2((double) stride));
dim3 block(stride, div_up(MAX_BLOCK_SIZE, stride * 2));
dim3 grid(div_up(batch_size, block.y), 1, nstack);
auto load_input = [batch_size, stride, input_a] __device__ (scalar_t* s_input) {
const int b = blockIdx.x * blockDim.y + threadIdx.y;
const int s = blockIdx.z;
if (b < batch_size) {
for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) {
s_input[i + threadIdx.y * stride * 2] = input_a[b][s][i];
}
}
};
auto save_output = [batch_size, stride, output_a] __device__ (scalar_t* s_input) mutable {
const int b = blockIdx.x * blockDim.y + threadIdx.y;
const int s = blockIdx.z;
if (b < batch_size) {
for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) {
output_a[b][s][i] = s_input[i + threadIdx.y * stride * 2];
}
}
};
increasing_stride ? butterfly_ortho_multiply_untied_cuda_kernel<scalar_t, true>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_cos_a, twiddle_sin_a, load_input, save_output, log_stride, batch_size)
: butterfly_ortho_multiply_untied_cuda_kernel<scalar_t, false>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_cos_a, twiddle_sin_a, load_input, save_output, log_stride, batch_size);
});
TORCH_CHECK(cudaGetLastError() == cudaSuccess,
"butterfly_ortho_multiply_untied_cuda failed with error code ",
cudaGetLastError());
}
template <typename scalar_t, typename accscalar_t, bool increasing_stride,
typename Function0, typename Function1, typename Function2>
__global__ void butterfly_ortho_multiply_untied_backward_cuda_kernel(const CudaAcsr32<scalar_t, 3> twiddle_cos_a,
const CudaAcsr32<scalar_t, 3> twiddle_sin_a,
Function0 load_output,
Function1 load_grad,
CudaAcsr32<scalar_t, 3> d_twiddle_a,
Function2 save_d_input,
int log_max_stride,
int batch_size) {
const int s = blockIdx.y + gridDim.y * blockIdx.z; // For conv2d butterfly_ortho as well
const int max_stride = 1 << log_max_stride;
const int input_base_idx = 0;
__shared__ scalar_t s_output[ELEMENTARY_SIZE * 2];
__shared__ scalar_t s_grad[ELEMENTARY_SIZE * 2];
__shared__ scalar_t s_twiddle[ELEMENTARY_SIZE][2];
__shared__ accscalar_t s_d_twiddle[ELEMENTARY_SIZE];
int b = blockIdx.x * blockDim.y + threadIdx.y;
int tid_x = threadIdx.x;
int tid_y = threadIdx.y;
load_output(s_output);
load_grad(s_grad);
for (int idx = log_max_stride; idx >= 0; --idx) {
int log_stride = increasing_stride ? idx : log_max_stride - idx;
int stride = 1 << log_stride;
// tid_y == 0 is writing (atomicAdd) so tid_y == -1 can do the reading, instead of having to wait for tid_y == 0
if (tid_y == blockDim.y - 1) {
s_twiddle[tid_x][0] = twiddle_cos_a[s][log_stride][input_base_idx / 2 + tid_x];
s_twiddle[tid_x][1] = twiddle_sin_a[s][log_stride][input_base_idx / 2 + tid_x];
}
int low_order_bits = tid_x & (stride - 1); // int low_order_bits = tid_x % stride;
int pos_x = 2 * (tid_x - low_order_bits) + low_order_bits;
int pos_y = tid_y * max_stride * 2;
int pos = pos_x + pos_y;
__syncthreads();
const scalar_t twiddle_val[2] = {s_twiddle[tid_x][0], s_twiddle[tid_x][1]};
if (b < batch_size) {
const scalar_t grad_val[2] = {s_grad[pos], s_grad[pos + stride]};
s_grad[pos] = twiddle_val[0] * grad_val[0] + twiddle_val[1] * grad_val[1];
s_grad[pos + stride] = -twiddle_val[1] * grad_val[0] + twiddle_val[0] * grad_val[1];
const scalar_t output_val[2] = {s_output[pos], s_output[pos + stride]};
const scalar_t input_val[2] = {twiddle_val[0] * output_val[0] + twiddle_val[1] * output_val[1],
-twiddle_val[1] * output_val[0] + twiddle_val[0] * output_val[1]};
s_output[pos] = input_val[0];
s_output[pos + stride] = input_val[1];
s_d_twiddle[tid_x + tid_y * max_stride]
= (grad_val[0] * input_val[0] + grad_val[1] * input_val[1]) * (-twiddle_val[1])
+ (-grad_val[0] * input_val[1] + grad_val[1] * input_val[0]) * twiddle_val[0];
}
__syncthreads();
if (tid_y == 0) {
accscalar_t d_twiddle_val = 0;
for (int i = 0; i < blockDim.y; ++i) {
if (blockIdx.x * blockDim.y + i < batch_size) {
d_twiddle_val += s_d_twiddle[tid_x + i * max_stride];
}
}
atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + tid_x], d_twiddle_val);
}
}
save_d_input(s_grad);
}
void butterfly_ortho_multiply_untied_backward_cuda(const at::Tensor& twiddle_cos, const at::Tensor& twiddle_sin, const at::Tensor& output,
const at::Tensor& grad, at::Tensor& d_twiddle, at::Tensor& d_input, bool increasing_stride) {
int batch_size = output.size(0);
const int nstack = output.size(1);
const int n = output.size(2);
const int log_n = int(log2((double) n));
AT_DISPATCH_FLOATING_TYPES(output.scalar_type(), "butterfly_ortho_multiply_untied_backward_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
const auto twiddle_cos_a = twiddle_cos.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>();
const auto twiddle_sin_a = twiddle_sin.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>();
const auto output_a = output.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>();
const auto grad_a = grad.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>();
auto d_twiddle_a = d_twiddle.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>();
auto d_input_a = d_input.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>();
int stride = std::min<int>(ELEMENTARY_SIZE, n / 2);
int log_stride = int(log2((double) stride));
dim3 block(stride, div_up(MAX_BLOCK_SIZE, stride * 2));
dim3 grid(div_up(batch_size, block.y), 1, nstack);
auto load_output = [batch_size, stride, output_a] __device__ (scalar_t* s_output) {
const int b = blockIdx.x * blockDim.y + threadIdx.y;
const int s = blockIdx.z;
if (b < batch_size) {
for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) {
s_output[i + threadIdx.y * stride * 2] = output_a[b][s][i];
}
}
};
auto load_grad = [batch_size, stride, grad_a] __device__ (scalar_t* s_grad) {
const int b = blockIdx.x * blockDim.y + threadIdx.y;
const int s = blockIdx.z;
if (b < batch_size) {
for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) {
s_grad[i + threadIdx.y * stride * 2] = grad_a[b][s][i];
}
}
};
auto save_d_input = [batch_size, stride, d_input_a] __device__ (scalar_t* s_grad) mutable {
const int b = blockIdx.x * blockDim.y + threadIdx.y;
const int s = blockIdx.z;
if (b < batch_size) {
for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) {
d_input_a[b][s][i] = s_grad[i + threadIdx.y * stride * 2];
}
}
};
increasing_stride ? butterfly_ortho_multiply_untied_backward_cuda_kernel<scalar_t, accscalar_t, true>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_cos_a, twiddle_sin_a, load_output, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size)
: butterfly_ortho_multiply_untied_backward_cuda_kernel<scalar_t, accscalar_t, false>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_cos_a, twiddle_sin_a, load_output, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size);
});
TORCH_CHECK(cudaGetLastError() == cudaSuccess,
"butterfly_ortho_multiply_untied_backward_cuda failed with error code ",
cudaGetLastError());
}
template <typename scalar_t, typename Function0, typename Function1>
__global__ void bbt_multiply_untied_cuda_kernel(const CudaAcsr32<scalar_t, 5> twiddle_a,
Function0 load_input,
Function1 save_output,
int log_max_stride,
int batch_size,
int nblocks) {
const int s = blockIdx.y + gridDim.y * blockIdx.z; // For conv2d bbt as well
const int max_stride = 1 << log_max_stride;
const int input_base_idx = 0;
__shared__ scalar_t s_input[ELEMENTARY_SIZE * 2];
__shared__ scalar_t s_twiddle[ELEMENTARY_SIZE][2][2];
load_input(s_input);
int b = blockIdx.x * blockDim.y + threadIdx.y;
int tid_x = threadIdx.x;
int tid_y = threadIdx.y;
for (int block = 0; block < nblocks; ++block) {
for (int idx = 0; idx < 2 * (log_max_stride + 1); ++idx) {
int log_stride = idx <= log_max_stride ? log_max_stride - idx : idx - log_max_stride - 1;
int stride = 1 << log_stride;
if (tid_y == 0) {
s_twiddle[tid_x][0][0] = twiddle_a[s][idx + block * 2 * (log_max_stride + 1)][input_base_idx / 2 + tid_x][0][0];
s_twiddle[tid_x][0][1] = twiddle_a[s][idx + block * 2 * (log_max_stride + 1)][input_base_idx / 2 + tid_x][0][1];
s_twiddle[tid_x][1][0] = twiddle_a[s][idx + block * 2 * (log_max_stride + 1)][input_base_idx / 2 + tid_x][1][0];
s_twiddle[tid_x][1][1] = twiddle_a[s][idx + block * 2 * (log_max_stride + 1)][input_base_idx / 2 + tid_x][1][1];
}
int low_order_bits = tid_x & (stride - 1); // int low_order_bits = tid_x % stride;
int pos_x = 2 * (tid_x - low_order_bits) + low_order_bits;
int pos_y = tid_y * max_stride * 2;
int pos = pos_x + pos_y;
__syncthreads();
const scalar_t twiddle_val[2][2] = {{s_twiddle[tid_x][0][0], s_twiddle[tid_x][0][1]},
{s_twiddle[tid_x][1][0], s_twiddle[tid_x][1][1]}};
if (b < batch_size) {
const scalar_t input_val[2] = {s_input[pos], s_input[pos + stride]};
s_input[pos] = twiddle_val[0][0] * input_val[0] + twiddle_val[0][1] * input_val[1];
s_input[pos + stride] = twiddle_val[1][0] * input_val[0] + twiddle_val[1][1] * input_val[1];
}
__syncthreads();
// otherwise some thread might go back to writing to s_twiddle before other thread can read
}
}
save_output(s_input);
}
void bbt_multiply_untied_cuda(const at::Tensor& twiddle, const at::Tensor& input, at::Tensor& output) {
int batch_size = input.size(0);
const int nstack = input.size(1);
const int n = input.size(2);
const int log_n = int(log2((double) n));
int nblocks = twiddle.size(1) / (2 * log_n);
AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "bbt_multiply_untied_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
const auto twiddle_a = twiddle.packed_accessor32<scalar_t, 5, at::RestrictPtrTraits>();
const auto input_a = input.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>();
auto output_a = output.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>();
int stride = std::min<int>(ELEMENTARY_SIZE, n / 2);
int log_stride = int(log2((double) stride));
dim3 block(stride, div_up(MAX_BLOCK_SIZE, stride * 2));
dim3 grid(div_up(batch_size, block.y), 1, nstack);
auto load_input = [batch_size, stride, input_a] __device__ (scalar_t* s_input) {
const int b = blockIdx.x * blockDim.y + threadIdx.y;
const int s = blockIdx.z;
if (b < batch_size) {
for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) {
s_input[i + threadIdx.y * stride * 2] = input_a[b][s][i];
}
}
};
auto save_output = [batch_size, stride, output_a] __device__ (scalar_t* s_input) mutable {
const int b = blockIdx.x * blockDim.y + threadIdx.y;
const int s = blockIdx.z;
if (b < batch_size) {
for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) {
output_a[b][s][i] = s_input[i + threadIdx.y * stride * 2];
}
}
};
bbt_multiply_untied_cuda_kernel<scalar_t>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, save_output, log_stride, batch_size, nblocks);
});
TORCH_CHECK(cudaGetLastError() == cudaSuccess,
"bbt_multiply_untied_cuda failed with error code ",
cudaGetLastError());
}
template <typename scalar_t, typename accscalar_t, int nblocks,
typename Function0, typename Function1, typename Function2>
__global__ void bbt_multiply_untied_forward_backward_cuda_kernel(const CudaAcsr32<scalar_t, 5> twiddle_a,
Function0 load_input,
Function1 load_grad,
CudaAcsr32<scalar_t, 5> d_twiddle_a,
Function2 save_d_input,
int log_max_stride,
int batch_size) {
const int s = blockIdx.y + gridDim.y * blockIdx.z; // For conv2d bbt as well
const int max_stride = 1 << log_max_stride;
const int input_base_idx = 0;
__shared__ scalar_t s_input[ELEMENTARY_SIZE * 2];
__shared__ scalar_t s_twiddle[ELEMENTARY_SIZE][2][2];
// Forward pass to compute the intermediate values
scalar_t input_val_storage[nblocks * 2 * MAX_N_FACTORS][2]; // Storing inputs for backward pass
load_input(s_input);
int b = blockIdx.x * blockDim.y + threadIdx.y;
int tid_x = threadIdx.x;
int tid_y = threadIdx.y;
for (int block = 0; block < nblocks; ++block) {
for (int idx = 0; idx < 2 * (log_max_stride + 1); ++idx) { // Let's not skip steps for now
int log_stride = idx <= log_max_stride ? log_max_stride - idx : idx - log_max_stride - 1;
int stride = 1 << log_stride;
if (tid_y == 0) {
s_twiddle[tid_x][0][0] = twiddle_a[s][idx + block * 2 * (log_max_stride + 1)][input_base_idx / 2 + tid_x][0][0];
s_twiddle[tid_x][0][1] = twiddle_a[s][idx + block * 2 * (log_max_stride + 1)][input_base_idx / 2 + tid_x][0][1];
s_twiddle[tid_x][1][0] = twiddle_a[s][idx + block * 2 * (log_max_stride + 1)][input_base_idx / 2 + tid_x][1][0];
s_twiddle[tid_x][1][1] = twiddle_a[s][idx + block * 2 * (log_max_stride + 1)][input_base_idx / 2 + tid_x][1][1];
}
int low_order_bits = tid_x & (stride - 1); // int low_order_bits = tid_x % stride;
int pos_x = 2 * (tid_x - low_order_bits) + low_order_bits;
int pos_y = tid_y * max_stride * 2;
int pos = pos_x + pos_y;
__syncthreads();
const scalar_t twiddle_val[2][2] = {{s_twiddle[tid_x][0][0], s_twiddle[tid_x][0][1]},
{s_twiddle[tid_x][1][0], s_twiddle[tid_x][1][1]}};
if (b < batch_size) {
const scalar_t input_val[2] = {s_input[pos], s_input[pos + stride]};
input_val_storage[idx + block * 2 * (log_max_stride + 1)][0] = input_val[0];
input_val_storage[idx + block * 2 * (log_max_stride + 1)][1] = input_val[1];
s_input[pos] = twiddle_val[0][0] * input_val[0] + twiddle_val[0][1] * input_val[1];
s_input[pos + stride] = twiddle_val[1][0] * input_val[0] + twiddle_val[1][1] * input_val[1];
}
__syncthreads();
// otherwise some thread might go back to writing to s_twiddle before other thread can read
// or s_s_input will be overwritten with s_grad before some thread can read
}
}
// Backward pass
scalar_t* s_grad = &s_input[0]; // Reusing the same storage as s_input
__shared__ accscalar_t s_d_twiddle[ELEMENTARY_SIZE][2][2];
load_grad(s_grad);
for (int block = nblocks - 1; block >= 0; --block) {
for (int idx = 2 * (log_max_stride + 1) - 1; idx >= 0; --idx) {
int log_stride = idx <= log_max_stride ? log_max_stride - idx : idx - log_max_stride - 1;
int stride = 1 << log_stride;
// tid_y == 0 is writing (atomicAdd) so tid_y == -1 can do the reading, instead of having to wait for tid_y == 0
if (tid_y == blockDim.y - 1) {
s_twiddle[tid_x][0][0] = twiddle_a[s][idx + block * 2 * (log_max_stride + 1)][input_base_idx / 2 + tid_x][0][0];
s_twiddle[tid_x][0][1] = twiddle_a[s][idx + block * 2 * (log_max_stride + 1)][input_base_idx / 2 + tid_x][0][1];
s_twiddle[tid_x][1][0] = twiddle_a[s][idx + block * 2 * (log_max_stride + 1)][input_base_idx / 2 + tid_x][1][0];
s_twiddle[tid_x][1][1] = twiddle_a[s][idx + block * 2 * (log_max_stride + 1)][input_base_idx / 2 + tid_x][1][1];
}
int low_order_bits = tid_x & (stride - 1); // int low_order_bits = tid_x % stride;
int pos_x = 2 * (tid_x - low_order_bits) + low_order_bits;
int pos_y = tid_y * max_stride * 2;
int pos = pos_x + pos_y;
__syncthreads();
const scalar_t twiddle_val[2][2] = {{s_twiddle[tid_x][0][0], s_twiddle[tid_x][0][1]},
{s_twiddle[tid_x][1][0], s_twiddle[tid_x][1][1]}};
if (b < batch_size) {
const scalar_t grad_val[2] = {s_grad[pos], s_grad[pos + stride]};
s_grad[pos] = twiddle_val[0][0] * grad_val[0] + twiddle_val[1][0] * grad_val[1];
s_grad[pos + stride] = twiddle_val[0][1] * grad_val[0] + twiddle_val[1][1] * grad_val[1];
const scalar_t input_val[2] = {input_val_storage[idx + block * 2 * (log_max_stride + 1)][0], input_val_storage[idx + block * 2 * (log_max_stride + 1)][1]};
s_d_twiddle[tid_x + tid_y * max_stride][0][0] = grad_val[0] * input_val[0];
s_d_twiddle[tid_x + tid_y * max_stride][0][1] = grad_val[0] * input_val[1];
s_d_twiddle[tid_x + tid_y * max_stride][1][0] = grad_val[1] * input_val[0];
s_d_twiddle[tid_x + tid_y * max_stride][1][1] = grad_val[1] * input_val[1];
}
__syncthreads();
if (tid_y == 0) {
accscalar_t d_twiddle_val[2][2] = {{0, 0}, {0, 0}};
for (int i = 0; i < blockDim.y; ++i) {
if (blockIdx.x * blockDim.y + i < batch_size) {
d_twiddle_val[0][0] += s_d_twiddle[tid_x + i * max_stride][0][0];
d_twiddle_val[0][1] += s_d_twiddle[tid_x + i * max_stride][0][1];
d_twiddle_val[1][0] += s_d_twiddle[tid_x + i * max_stride][1][0];
d_twiddle_val[1][1] += s_d_twiddle[tid_x + i * max_stride][1][1];
}
}
atomicAdd(&d_twiddle_a[s][idx + block * 2 * (log_max_stride + 1)][input_base_idx / 2 + tid_x][0][0], d_twiddle_val[0][0]);
atomicAdd(&d_twiddle_a[s][idx + block * 2 * (log_max_stride + 1)][input_base_idx / 2 + tid_x][0][1], d_twiddle_val[0][1]);
atomicAdd(&d_twiddle_a[s][idx + block * 2 * (log_max_stride + 1)][input_base_idx / 2 + tid_x][1][0], d_twiddle_val[1][0]);
atomicAdd(&d_twiddle_a[s][idx + block * 2 * (log_max_stride + 1)][input_base_idx / 2 + tid_x][1][1], d_twiddle_val[1][1]);
}
}
}
save_d_input(s_grad);
}
void bbt_multiply_untied_forward_backward_cuda(const at::Tensor& twiddle, const at::Tensor& input, const at::Tensor& grad,
at::Tensor& d_twiddle, at::Tensor& d_input) {
int batch_size = input.size(0);
const int nstack = input.size(1);
const int n = input.size(2);
const int log_n = int(log2((double) n));
int nblocks = twiddle.size(1) / (2 * log_n);
AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "bbt_multiply_untied_forward_backward_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
const auto twiddle_a = twiddle.packed_accessor32<scalar_t, 5, at::RestrictPtrTraits>();
const auto input_a = input.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>();
const auto grad_a = grad.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>();
auto d_twiddle_a = d_twiddle.packed_accessor32<scalar_t, 5, at::RestrictPtrTraits>();
auto d_input_a = d_input.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>();
int stride = std::min<int>(ELEMENTARY_SIZE, n / 2);
int log_stride = int(log2((double) stride));
dim3 block(stride, div_up(MAX_BLOCK_SIZE, stride * 2));
dim3 grid(div_up(batch_size, block.y), 1, nstack);
auto load_input = [batch_size, stride, input_a] __device__ (scalar_t* s_input) {
const int b = blockIdx.x * blockDim.y + threadIdx.y;
const int s = blockIdx.z;
if (b < batch_size) {
for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) {
s_input[i + threadIdx.y * stride * 2] = input_a[b][s][i];
}
}
};
auto load_grad = [batch_size, stride, grad_a] __device__ (scalar_t* s_grad) {
const int b = blockIdx.x * blockDim.y + threadIdx.y;
const int s = blockIdx.z;
if (b < batch_size) {
for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) {
s_grad[i + threadIdx.y * stride * 2] = grad_a[b][s][i];
}
}
};
auto save_d_input = [batch_size, stride, d_input_a] __device__ (scalar_t* s_grad) mutable {
const int b = blockIdx.x * blockDim.y + threadIdx.y;
const int s = blockIdx.z;
if (b < batch_size) {
for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) {
d_input_a[b][s][i] = s_grad[i + threadIdx.y * stride * 2];
}
}
};
switch (nblocks)
{
case 1:
bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 1>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break;
case 2:
bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 2>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break;
case 3:
bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 3>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break;
case 4:
bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 4>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break;
case 5:
bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 5>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break;
case 6:
bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 6>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break;
case 7:
bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 7>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break;
case 8:
bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 8>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break;
case 9:
bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 9>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break;
case 10:
bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 10>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break;
case 11:
bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 11>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break;
case 12:
bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 12>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break;
case 13:
bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 13>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break;
case 14:
bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 14>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break;
}
});
TORCH_CHECK(cudaGetLastError() == cudaSuccess,
"bbt_multiply_untied_forward_backward_cuda failed with error code ",
cudaGetLastError());
}
template <typename scalar_t, typename Function0, typename Function1>
__global__ void bbt_ortho_multiply_untied_cuda_kernel(const CudaAcsr32<scalar_t, 3> twiddle_cos_a,
const CudaAcsr32<scalar_t, 3> twiddle_sin_a,
Function0 load_input,
Function1 save_output,
int log_max_stride,
int batch_size,
int nblocks) {
const int s = blockIdx.y + gridDim.y * blockIdx.z; // For conv2d bbt_ortho as well
const int max_stride = 1 << log_max_stride;
const int input_base_idx = 0;
__shared__ scalar_t s_input[ELEMENTARY_SIZE * 2];
__shared__ scalar_t s_twiddle[ELEMENTARY_SIZE][2];
load_input(s_input);
int b = blockIdx.x * blockDim.y + threadIdx.y;
int tid_x = threadIdx.x;
int tid_y = threadIdx.y;
for (int block = 0; block < nblocks; ++block) {
for (int idx = 0; idx < 2 * (log_max_stride + 1); ++idx) {
int log_stride = idx <= log_max_stride ? log_max_stride - idx : idx - log_max_stride - 1;
int stride = 1 << log_stride;
if (tid_y == 0) {
s_twiddle[tid_x][0] = twiddle_cos_a[s][idx + block * 2 * (log_max_stride + 1)][input_base_idx / 2 + tid_x];
s_twiddle[tid_x][1] = twiddle_sin_a[s][idx + block * 2 * (log_max_stride + 1)][input_base_idx / 2 + tid_x];
}
int low_order_bits = tid_x & (stride - 1); // int low_order_bits = tid_x % stride;
int pos_x = 2 * (tid_x - low_order_bits) + low_order_bits;
int pos_y = tid_y * max_stride * 2;
int pos = pos_x + pos_y;
__syncthreads();
const scalar_t twiddle_val[2] = {s_twiddle[tid_x][0], s_twiddle[tid_x][1]};
if (b < batch_size) {
const scalar_t input_val[2] = {s_input[pos], s_input[pos + stride]};
s_input[pos] = twiddle_val[0] * input_val[0] - twiddle_val[1] * input_val[1];
s_input[pos + stride] = twiddle_val[1] * input_val[0] + twiddle_val[0] * input_val[1];
}
__syncthreads();
// otherwise some thread might go back to writing to s_twiddle before other thread can read
}
}
save_output(s_input);
}
void bbt_ortho_multiply_untied_cuda(const at::Tensor& twiddle_cos, const at::Tensor& twiddle_sin,
const at::Tensor& input, at::Tensor& output) {
int batch_size = input.size(0);
const int nstack = input.size(1);
const int n = input.size(2);
const int log_n = int(log2((double) n));
int nblocks = twiddle_cos.size(1) / (2 * log_n);
AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "bbt_ortho_multiply_untied_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
const auto twiddle_cos_a = twiddle_cos.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>();
const auto twiddle_sin_a = twiddle_sin.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>();
const auto input_a = input.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>();
auto output_a = output.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>();
int stride = std::min<int>(ELEMENTARY_SIZE, n / 2);
int log_stride = int(log2((double) stride));
dim3 block(stride, div_up(MAX_BLOCK_SIZE, stride * 2));
dim3 grid(div_up(batch_size, block.y), 1, nstack);
auto load_input = [batch_size, stride, input_a] __device__ (scalar_t* s_input) {
const int b = blockIdx.x * blockDim.y + threadIdx.y;
const int s = blockIdx.z;
if (b < batch_size) {
for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) {
s_input[i + threadIdx.y * stride * 2] = input_a[b][s][i];
}
}
};
auto save_output = [batch_size, stride, output_a] __device__ (scalar_t* s_input) mutable {
const int b = blockIdx.x * blockDim.y + threadIdx.y;
const int s = blockIdx.z;
if (b < batch_size) {
for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) {
output_a[b][s][i] = s_input[i + threadIdx.y * stride * 2];
}
}
};
bbt_ortho_multiply_untied_cuda_kernel<scalar_t>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_cos_a, twiddle_sin_a, load_input,
save_output, log_stride, batch_size, nblocks);
});
TORCH_CHECK(cudaGetLastError() == cudaSuccess,
"bbt_ortho_multiply_untied_cuda failed with error code ",
cudaGetLastError());
}
template <typename scalar_t, typename accscalar_t,
typename Function0, typename Function1, typename Function2>
__global__ void bbt_ortho_multiply_untied_backward_cuda_kernel(const CudaAcsr32<scalar_t, 3> twiddle_cos_a,
const CudaAcsr32<scalar_t, 3> twiddle_sin_a,
Function0 load_output,
Function1 load_grad,
CudaAcsr32<scalar_t, 3> d_twiddle_a,
Function2 save_d_input,
int log_max_stride,
int batch_size,
int nblocks) {
const int s = blockIdx.y + gridDim.y * blockIdx.z; // For conv2d bbt_ortho as well
const int max_stride = 1 << log_max_stride;
const int input_base_idx = 0;
__shared__ scalar_t s_output[ELEMENTARY_SIZE * 2];
__shared__ scalar_t s_grad[ELEMENTARY_SIZE * 2];
__shared__ scalar_t s_twiddle[ELEMENTARY_SIZE][2];
__shared__ accscalar_t s_d_twiddle[ELEMENTARY_SIZE];
int b = blockIdx.x * blockDim.y + threadIdx.y;
int tid_x = threadIdx.x;
int tid_y = threadIdx.y;
load_output(s_output);
load_grad(s_grad);
for (int block = nblocks - 1; block >= 0; --block) {
for (int idx = 2 * (log_max_stride + 1) - 1; idx >= 0; --idx) {
int log_stride = idx <= log_max_stride ? log_max_stride - idx : idx - log_max_stride - 1;
int stride = 1 << log_stride;
// tid_y == 0 is writing (atomicAdd) so tid_y == -1 can do the reading, instead of having to wait for tid_y == 0
if (tid_y == blockDim.y - 1) {
s_twiddle[tid_x][0] = twiddle_cos_a[s][idx + block * 2 * (log_max_stride + 1)][input_base_idx / 2 + tid_x];
s_twiddle[tid_x][1] = twiddle_sin_a[s][idx + block * 2 * (log_max_stride + 1)][input_base_idx / 2 + tid_x];
}
int low_order_bits = tid_x & (stride - 1); // int low_order_bits = tid_x % stride;
int pos_x = 2 * (tid_x - low_order_bits) + low_order_bits;
int pos_y = tid_y * max_stride * 2;
int pos = pos_x + pos_y;
__syncthreads();
const scalar_t twiddle_val[2] = {s_twiddle[tid_x][0], s_twiddle[tid_x][1]};
if (b < batch_size) {
const scalar_t grad_val[2] = {s_grad[pos], s_grad[pos + stride]};
s_grad[pos] = twiddle_val[0] * grad_val[0] + twiddle_val[1] * grad_val[1];
s_grad[pos + stride] = -twiddle_val[1] * grad_val[0] + twiddle_val[0] * grad_val[1];
const scalar_t output_val[2] = {s_output[pos], s_output[pos + stride]};
const scalar_t input_val[2] = {twiddle_val[0] * output_val[0] + twiddle_val[1] * output_val[1],
-twiddle_val[1] * output_val[0] + twiddle_val[0] * output_val[1]};
s_output[pos] = input_val[0];
s_output[pos + stride] = input_val[1];
s_d_twiddle[tid_x + tid_y * max_stride]
= (grad_val[0] * input_val[0] + grad_val[1] * input_val[1]) * (-twiddle_val[1])
+ (-grad_val[0] * input_val[1] + grad_val[1] * input_val[0]) * twiddle_val[0];
}
__syncthreads();
if (tid_y == 0) {
accscalar_t d_twiddle_val = 0;
for (int i = 0; i < blockDim.y; ++i) {
if (blockIdx.x * blockDim.y + i < batch_size) {
d_twiddle_val += s_d_twiddle[tid_x + i * max_stride];
}
}
atomicAdd(&d_twiddle_a[s][idx + block * 2 * (log_max_stride + 1)][input_base_idx / 2 + tid_x], d_twiddle_val);
}
}
}
save_d_input(s_grad);
}
void bbt_ortho_multiply_untied_backward_cuda(const at::Tensor& twiddle_cos, const at::Tensor& twiddle_sin, const at::Tensor& output,
const at::Tensor& grad, at::Tensor& d_twiddle, at::Tensor& d_input) {
int batch_size = output.size(0);
const int nstack = output.size(1);
const int n = output.size(2);
const int log_n = int(log2((double) n));
int nblocks = twiddle_cos.size(1) / (2 * log_n);
AT_DISPATCH_FLOATING_TYPES(output.scalar_type(), "bbt_ortho_multiply_untied_backward_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
const auto twiddle_cos_a = twiddle_cos.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>();
const auto twiddle_sin_a = twiddle_sin.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>();
const auto output_a = output.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>();
const auto grad_a = grad.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>();
auto d_twiddle_a = d_twiddle.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>();
auto d_input_a = d_input.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>();
int stride = std::min<int>(ELEMENTARY_SIZE, n / 2);
int log_stride = int(log2((double) stride));
dim3 block(stride, div_up(MAX_BLOCK_SIZE, stride * 2));
dim3 grid(div_up(batch_size, block.y), 1, nstack);
auto load_output = [batch_size, stride, output_a] __device__ (scalar_t* s_output) {
const int b = blockIdx.x * blockDim.y + threadIdx.y;
const int s = blockIdx.z;
if (b < batch_size) {
for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) {
s_output[i + threadIdx.y * stride * 2] = output_a[b][s][i];
}
}
};
auto load_grad = [batch_size, stride, grad_a] __device__ (scalar_t* s_grad) {
const int b = blockIdx.x * blockDim.y + threadIdx.y;
const int s = blockIdx.z;
if (b < batch_size) {
for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) {
s_grad[i + threadIdx.y * stride * 2] = grad_a[b][s][i];
}
}
};
auto save_d_input = [batch_size, stride, d_input_a] __device__ (scalar_t* s_grad) mutable {
const int b = blockIdx.x * blockDim.y + threadIdx.y;
const int s = blockIdx.z;
if (b < batch_size) {
for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) {
d_input_a[b][s][i] = s_grad[i + threadIdx.y * stride * 2];
}
}
};
bbt_ortho_multiply_untied_backward_cuda_kernel<scalar_t, accscalar_t>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_cos_a, twiddle_sin_a, load_output, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size, nblocks);
});
TORCH_CHECK(cudaGetLastError() == cudaSuccess,
"bbt_ortho_multiply_untied_backward_cuda failed with error code ",
cudaGetLastError());
}
template <typename scalar_t, bool increasing_stride, bool return_intermediates>
__global__ void butterfly_conv2d_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 5> twiddle_a,
const at::PackedTensorAccessor64<scalar_t, 4> input_a,
at::PackedTensorAccessor64<scalar_t, 4> output_a,
int log_max_stride,
int log_n,
int kernel_size,
int padding,
int h_out,
int w_out) {
const int batch_size = output_a.size(1);
const int stack = blockIdx.z;
const int s = blockIdx.y + gridDim.y * stack;
const int max_stride = 1 << log_max_stride;
// base index always 0
const int input_base_idx = 0;
const int h_in = input_a.size(2);
const int w_in = input_a.size(3);
__shared__ scalar_t s_input[ELEMENTARY_SIZE * 2];
__shared__ scalar_t s_twiddle[ELEMENTARY_SIZE][2][2];
int b = blockIdx.x * blockDim.y + threadIdx.y;
const int patch_idx = b % (h_out * w_out);
const int batch_idx = b / (h_out * w_out);
int first_idx = increasing_stride ? 0 : log_n - 1 - log_max_stride;
if (b < batch_size) {
for (int t = threadIdx.x; t < max_stride * 2; t += blockDim.x) {
// get index into patch
int k_i = stack / kernel_size;
int k_j = stack % kernel_size;
// get patch index into full matrix
int p_i = (patch_idx) / w_out;
int p_j = (patch_idx) % (w_out);
// combine indices and adjust for padding
int i = k_i + p_i - padding;
int j = k_j + p_j - padding;
if (i >= w_in or j >= h_in or i < 0 or j < 0) s_input[t + threadIdx.y * max_stride * 2] = 0;
else{
s_input[t + threadIdx.y * max_stride * 2] = input_a[batch_idx][input_base_idx + t][i][j];
// load input into first idx of output for backward pass
// we allocated this memory already so shouldn't affect too much
output_a[0][b][s][input_base_idx + t] = s_input[t + threadIdx.y * max_stride * 2];
}
}
}
int tid_x = threadIdx.x;
int tid_y = threadIdx.y;
for (int idx = first_idx; idx <= first_idx + log_max_stride; ++idx) {
int log_stride = increasing_stride ? idx : log_n - 1 - idx;
int stride = 1 << log_stride;
if (tid_y == 0) {
s_twiddle[tid_x][0][0] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][0][0];
s_twiddle[tid_x][0][1] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][0][1];
s_twiddle[tid_x][1][0] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][1][0];
s_twiddle[tid_x][1][1] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][1][1];
}
int low_order_bits = tid_x & (stride - 1); // int low_order_bits = tid_x % stride;
int pos_x = 2 * (tid_x - low_order_bits) + low_order_bits;
int pos_y = tid_y * max_stride * 2;
int pos = pos_x + pos_y;
__syncthreads();
const scalar_t twiddle_val[2][2] = {{s_twiddle[tid_x][0][0], s_twiddle[tid_x][0][1]},
{s_twiddle[tid_x][1][0], s_twiddle[tid_x][1][1]}};
__syncthreads(); // otherwise some thread might go back to writing to s_twiddle before other thread can read
if (b < batch_size) {
const scalar_t input_val[2] = {s_input[pos], s_input[pos + stride]};
s_input[pos] = twiddle_val[0][0] * input_val[0] + twiddle_val[0][1] * input_val[1];
s_input[pos + stride] = twiddle_val[1][0] * input_val[0] + twiddle_val[1][1] * input_val[1];
if (return_intermediates || idx == first_idx + log_max_stride) {
output_a[idx+1][b][s][input_base_idx + pos_x] = s_input[pos];
output_a[idx+1][b][s][input_base_idx + pos_x + stride] = s_input[pos + stride];
}
}
}
}
void butterfly_conv2d_cuda(const at::Tensor& twiddle,
const at::Tensor& input, at::Tensor& output,
const int kernel_size, const int padding,
const int h_out, const int w_out, bool increasing_stride,
bool return_intermediates)
{
const int b_in = input.size(0);
const int n = input.size(1); /*c*/
const int nstack = twiddle.size(0);
const int stack = kernel_size*kernel_size;
const int log_n = int(log2((double) n));
const int batch_size = output.size(1);
AT_DISPATCH_FLOATING_TYPES(output.scalar_type(),
"butterfly_conv2d_cuda", [&] {
const auto twiddle_a = twiddle.packed_accessor64<scalar_t, 5>();
// batch_size, c, h, w
const auto input_a = input.packed_accessor64<scalar_t, 4>();
// log c_in, h*w*batch_size, nstack, c_in
auto output_a = output.packed_accessor64<scalar_t, 4>();
// assume in_channels <= 1024
int stride = std::min<int>(ELEMENTARY_SIZE, n / 2);
int log_stride = int(log2((double) stride));
// to support out_channels > in_channels
int c_out_ratio = nstack / stack;
// dim3 block(stride);
// dim3 grid(batch_size, c_out_ratio, stack);
dim3 block(stride, div_up(MAX_BLOCK_SIZE, stride * 2));
dim3 grid(div_up(batch_size, block.y), c_out_ratio, stack);
if (increasing_stride) {
return_intermediates ? butterfly_conv2d_cuda_kernel<scalar_t, true, true>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, input_a,
output_a, log_stride, log_n, kernel_size, padding, h_out, w_out)
: butterfly_conv2d_cuda_kernel<scalar_t, true, false>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, input_a,
output_a, log_stride, log_n, kernel_size, padding, h_out, w_out);
}
else {
return_intermediates ? butterfly_conv2d_cuda_kernel<scalar_t, false, true>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, input_a,
output_a, log_stride, log_n, kernel_size, padding, h_out, w_out)
: butterfly_conv2d_cuda_kernel<scalar_t, false, false>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, input_a,
output_a, log_stride, log_n, kernel_size, padding, h_out, w_out);
}
});
TORCH_CHECK(cudaGetLastError() == cudaSuccess,
"butterfly_conv2d_cuda failed with error code ",
cudaGetLastError());
}
template <typename scalar_t, typename accscalar_t, bool increasing_stride>
__global__ void butterfly_conv2d_backward_cuda_kernel(
const at::PackedTensorAccessor64<scalar_t, 3> grad_a,
const at::PackedTensorAccessor64<scalar_t, 5> twiddle_a,
const at::PackedTensorAccessor64<scalar_t, 4> output_a,
at::PackedTensorAccessor64<scalar_t, 5> d_twiddle_a,
at::PackedTensorAccessor64<scalar_t, 4> d_input_a,
int log_max_stride,
int log_n,
int kernel_size,
int padding,
int h_out,
int w_out) {
const int batch_size = output_a.size(1);
const int stack = blockIdx.z;
const int s = blockIdx.y + gridDim.y * stack;
// base index always 0
const int input_base_idx = 0;
const int h_in = d_input_a.size(2);
const int w_in = d_input_a.size(3);
const int max_stride = 1 << log_max_stride;
__shared__ scalar_t s_grad[ELEMENTARY_SIZE * 2];
__shared__ accscalar_t s_twiddle[ELEMENTARY_SIZE][2][2]; // Use accscalar_t instead of scalar_t since we'll reuse the storage for s_d_twiddle
accscalar_t* s_d_twiddle = (accscalar_t *)&s_twiddle[0][0][0]; // Reusing the same storage as s_twiddle, have to be careful if we change the implemetnation.
int b = blockIdx.x * blockDim.y + threadIdx.y;
if (b < batch_size) {
for (int i = threadIdx.x; i < max_stride * 2; i += blockDim.x) {
s_grad[i + threadIdx.y * max_stride * 2] = grad_a[b][s][input_base_idx + i];
}
}
int tid_x = threadIdx.x;
int tid_y = threadIdx.y;
int first_idx = increasing_stride ? 0 : log_n - 1 - log_max_stride;
for (int idx = first_idx + log_max_stride; idx >= first_idx; --idx) {
int log_stride = increasing_stride ? idx : log_n - 1 - idx;
int stride = 1 << log_stride;
if (tid_y == 0) {
s_twiddle[tid_x][0][0] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][0][0];
s_twiddle[tid_x][0][1] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][0][1];
s_twiddle[tid_x][1][0] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][1][0];
s_twiddle[tid_x][1][1] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][1][1];
}
int low_order_bits = tid_x & (stride - 1); // int low_order_bits = tid_x % stride;
int pos_x = 2 * (tid_x - low_order_bits) + low_order_bits;
int pos_y = tid_y * max_stride * 2;
int pos = pos_x + pos_y;
__syncthreads();
const scalar_t twiddle_val[2][2] = {{s_twiddle[tid_x][0][0], s_twiddle[tid_x][0][1]},
{s_twiddle[tid_x][1][0], s_twiddle[tid_x][1][1]}};
// Don't need to sync here since we sync later at sum_strided_atomic, so no writing to s_twiddle can occur until then
accscalar_t d_twiddle_val[2][2] = {{0, 0}, {0, 0}};
if (b < batch_size) {
const scalar_t grad_val[2] = {s_grad[pos], s_grad[pos + stride]};
s_grad[pos] = twiddle_val[0][0] * grad_val[0] + twiddle_val[1][0] * grad_val[1];
s_grad[pos + stride] = twiddle_val[0][1] * grad_val[0] + twiddle_val[1][1] * grad_val[1];
const scalar_t input_val[2] = {output_a[idx][b][s][input_base_idx + pos_x],
output_a[idx][b][s][input_base_idx + pos_x + stride]};
d_twiddle_val[0][0] = grad_val[0] * input_val[0];
d_twiddle_val[0][1] = grad_val[0] * input_val[1];
d_twiddle_val[1][0] = grad_val[1] * input_val[0];
d_twiddle_val[1][1] = grad_val[1] * input_val[1];
}
int tid = threadIdx.x + threadIdx.y * blockDim.x;
int nthreads = blockDim.x * blockDim.y;
sum_strided_atomic(reinterpret_cast<accscalar_t (&)[4]>(d_twiddle_val), s_d_twiddle, max_stride, nthreads, tid);
if (tid_y == 0) {
atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][0][0], s_d_twiddle[tid_x]);
atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][0][1], s_d_twiddle[tid_x + max_stride]);
atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][1][0], s_d_twiddle[tid_x + 2 * max_stride]);
atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][1][1], s_d_twiddle[tid_x + 3 * max_stride]);
}
__syncthreads(); // Otherwise s_d_twiddle will be overwritten with s_twiddle before some thread can read
}
if (b < batch_size) {
const int patch_idx = b % (h_out * w_out);
const int batch_idx = b / (h_out * w_out);
for (int t = threadIdx.x; t < max_stride * 2; t += blockDim.x) {
// map back to b, c, h, w
// get index into patch
int k_i = stack / kernel_size; // stack / kernel_size
int k_j = stack % kernel_size; // stack % kernel_size
// get patch index into full matrix
int p_i = (patch_idx) / w_out;
int p_j = (patch_idx) % (w_out);
// combine indices and adjust for padding
int i = k_i + p_i - padding;
int j = k_j + p_j - padding;
// this needs to be atomic because input is reused in forward pass
// with out_channels > in_channels and for each entry of the patch
if (i < w_in && j < h_in && i >= 0 && j >= 0) {
atomicAdd(&d_input_a[batch_idx][input_base_idx + t][i][j], s_grad[t + threadIdx.y * max_stride * 2]);
}
}
}
}
void butterfly_conv2d_backward_cuda(const at::Tensor&grad, const at::Tensor& twiddle,
const at::Tensor& output, at::Tensor& d_twiddle, at::Tensor& d_input,
const int kernel_size, const int padding,
const int h_out, const int w_out,
bool increasing_stride) {
const int batch_size = output.size(1);
const int nstack = twiddle.size(0);
const int stack = kernel_size*kernel_size;
const int n = d_input.size(1); // c_in
const int log_n = int(log2((double) n));
AT_DISPATCH_FLOATING_TYPES(output.scalar_type(),
"butterfly_conv2d_backward_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
const auto grad_a = grad.packed_accessor64<scalar_t, 3>();
const auto twiddle_a = twiddle.packed_accessor64<scalar_t, 5>();
const auto output_a = output.packed_accessor64<scalar_t, 4>();
auto d_twiddle_a = d_twiddle.packed_accessor64<scalar_t, 5>();
auto d_input_a = d_input.packed_accessor64<scalar_t, 4>();
int stride = std::min<int>(ELEMENTARY_SIZE, n / 2);
int log_stride = int(log2((double) stride));
// to support out_channels > in_channels
int c_out_ratio = nstack / stack;
// dim3 block(stride);
// dim3 grid(batch_size, c_out_ratio, stack);
dim3 block(stride, div_up(MAX_BLOCK_SIZE, stride * 2));
dim3 grid(div_up(batch_size, block.y), c_out_ratio, stack);
increasing_stride ?
butterfly_conv2d_backward_cuda_kernel<scalar_t, accscalar_t, true>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(
grad_a, twiddle_a, output_a, d_twiddle_a, d_input_a, log_stride,
log_n, kernel_size, padding, h_out, w_out) :
butterfly_conv2d_backward_cuda_kernel<scalar_t, accscalar_t, false>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(
grad_a, twiddle_a, output_a, d_twiddle_a, d_input_a, log_stride,
log_n, kernel_size, padding, h_out, w_out);
});
TORCH_CHECK(cudaGetLastError() == cudaSuccess,
"butterfly_conv2d_backward_cuda failed with error code ",
cudaGetLastError());
}
void butterfly_conv2d_forward_backward_cuda(const at::Tensor& twiddle,
const at::Tensor& input, const at::Tensor&grad,
at::Tensor& d_twiddle, at::Tensor& d_input,
const int kernel_size, const int padding, const int h_out, const int w_out,
bool increasing_stride) {
const int batch_size = grad.size(0); // b_out = b_in * h_out * w_out
const int nstack = twiddle.size(0);
const int stack = kernel_size * kernel_size;
const int n = d_input.size(1); // c_in
const int log_n = int(log2((double) n));
const int c_out_ratio = nstack / stack;
const int h_in = input.size(2);
const int w_in = input.size(3);
AT_DISPATCH_FLOATING_TYPES(grad.scalar_type(),
"butterfly_conv2d_forward_backward_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
const auto twiddle_a = twiddle.packed_accessor32<scalar_t, 5, at::RestrictPtrTraits>();
const auto input_a = input.packed_accessor32<scalar_t, 4, at::RestrictPtrTraits>();
const auto grad_a = grad.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>();
auto d_twiddle_a = d_twiddle.packed_accessor32<scalar_t, 5, at::RestrictPtrTraits>();
auto d_input_a = d_input.packed_accessor32<scalar_t, 4, at::RestrictPtrTraits>();
int stride = std::min<int>(ELEMENTARY_SIZE, n / 2);
int log_stride = int(log2((double) stride));
dim3 block(stride, div_up(MAX_BLOCK_SIZE, stride * 2));
dim3 grid(div_up(batch_size, block.y), c_out_ratio, stack);
auto load_input = [batch_size, stride, input_a, kernel_size, padding, h_out, w_out, h_in, w_in] __device__ (scalar_t* s_input) {
const int b = blockIdx.x * blockDim.y + threadIdx.y;
const int stack = blockIdx.z;
const int patch_idx = b % (h_out * w_out);
const int batch_idx = b / (h_out * w_out);
if (b < batch_size) {
for (int t = threadIdx.x; t < stride * 2; t += blockDim.x) {
// get index into patch
int k_i = stack / kernel_size;
int k_j = stack % kernel_size;
// get patch index into full matrix
int p_i = (patch_idx) / w_out;
int p_j = (patch_idx) % (w_out);
// combine indices and adjust for padding
int i = k_i + p_i - padding;
int j = k_j + p_j - padding;
if (i >= w_in or j >= h_in or i < 0 or j < 0) s_input[t + threadIdx.y * stride * 2] = 0;
else{
s_input[t + threadIdx.y * stride * 2] = input_a[batch_idx][t][i][j];
}
}
}
};
auto load_grad = [batch_size, stride, grad_a] __device__ (scalar_t* s_grad) {
const int b = blockIdx.x * blockDim.y + threadIdx.y;
const int stack = blockIdx.z;
const int s = blockIdx.y + gridDim.y * stack;
if (b < batch_size) {
for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) {
s_grad[i + threadIdx.y * stride * 2] = grad_a[b][s][i];
}
}
};
auto save_d_input = [batch_size, stride, d_input_a, kernel_size, padding, h_out, w_out, h_in, w_in] __device__ (scalar_t* s_grad) mutable {
const int b = blockIdx.x * blockDim.y + threadIdx.y;
const int stack = blockIdx.z;
const int patch_idx = b % (h_out * w_out);
const int batch_idx = b / (h_out * w_out);
if (b < batch_size) {
for (int t = threadIdx.x; t < stride * 2; t += blockDim.x) {
// map back to b, c, h, w
// get index into patch
int k_i = stack / kernel_size;
int k_j = stack % kernel_size;
// get patch index into full matrix
int p_i = (patch_idx) / w_out;
int p_j = (patch_idx) % (w_out);
// combine indices and adjust for padding
int i = k_i + p_i - padding;
int j = k_j + p_j - padding;
if (i < w_in && j < h_in && i >= 0 && j >= 0) {
atomicAdd(&d_input_a[batch_idx][t][i][j], s_grad[t + threadIdx.y * stride * 2]);
}
}
}
};
switch (log_stride)
{
case 0:
increasing_stride ? butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, true, 0>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size)
: butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, false, 0>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size); break;
case 1:
increasing_stride ? butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, true, 1>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size)
: butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, false, 1>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size); break;
case 2:
increasing_stride ? butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, true, 2>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size)
: butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, false, 2>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size); break;
case 3:
increasing_stride ? butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, true, 3>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size)
: butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, false, 3>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size); break;
case 4:
increasing_stride ? butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, true, 4>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size)
: butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, false, 4>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size); break;
case 5:
increasing_stride ? butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, true, 5>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size)
: butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, false, 5>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size); break;
case 6:
increasing_stride ? butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, true, 6>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size)
: butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, false, 6>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size); break;
case 7:
increasing_stride ? butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, true, 7>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size)
: butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, false, 7>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size); break;
case 8:
increasing_stride ? butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, true, 8>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size)
: butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, false, 8>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size); break;
case 9:
increasing_stride ? butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, true, 9>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size)
: butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, false, 9>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size); break;
}
});
TORCH_CHECK(cudaGetLastError() == cudaSuccess,
"butterfly_conv2d_forward_backward_cuda failed with error code ",
cudaGetLastError());
}
void bbt_conv2d_cuda(const at::Tensor& twiddle,
const at::Tensor& input, at::Tensor& output,
const int kernel_size, const int padding,
const int h_out, const int w_out)
{
const int b_in = input.size(0);
const int n = input.size(1); /*c*/
const int nstack = twiddle.size(0);
const int stack = kernel_size*kernel_size;
const int log_n = int(log2((double) n));
int nblocks = twiddle.size(1) / (2 * log_n);
int batch_size = output.size(0);
const int h_in = input.size(2);
const int w_in = input.size(3);
AT_DISPATCH_FLOATING_TYPES(output.scalar_type(),
"bbt_conv2d_cuda", [&] {
const auto twiddle_a = twiddle.packed_accessor32<scalar_t, 5, at::RestrictPtrTraits>();
// batch_size, c, h, w
const auto input_a = input.packed_accessor32<scalar_t, 4, at::RestrictPtrTraits>();
// h*w*batch_size, nstack, c_in
auto output_a = output.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>();
// assume in_channels <= 1024
int stride = std::min<int>(ELEMENTARY_SIZE, n / 2);
int log_stride = int(log2((double) stride));
// to support out_channels > in_channels
int c_out_ratio = nstack / stack;
// dim3 block(stride);
// dim3 grid(batch_size, c_out_ratio, stack);
dim3 block(stride, div_up(MAX_BLOCK_SIZE, stride * 2));
dim3 grid(div_up(batch_size, block.y), c_out_ratio, stack);
auto load_input = [batch_size, stride, input_a, kernel_size, padding, h_out, w_out, h_in, w_in] __device__ (scalar_t* s_input) {
const int b = blockIdx.x * blockDim.y + threadIdx.y;
const int stack = blockIdx.z;
const int patch_idx = b % (h_out * w_out);
const int batch_idx = b / (h_out * w_out);
if (b < batch_size) {
for (int t = threadIdx.x; t < stride * 2; t += blockDim.x) {
// get index into patch
int k_i = stack / kernel_size;
int k_j = stack % kernel_size;
// get patch index into full matrix
int p_i = (patch_idx) / w_out;
int p_j = (patch_idx) % (w_out);
// combine indices and adjust for padding
int i = k_i + p_i - padding;
int j = k_j + p_j - padding;
if (i >= w_in or j >= h_in or i < 0 or j < 0) s_input[t + threadIdx.y * stride * 2] = 0;
else{
s_input[t + threadIdx.y * stride * 2] = input_a[batch_idx][t][i][j];
}
}
}
};
auto save_output = [batch_size, stride, output_a] __device__ (scalar_t* s_input) mutable {
const int b = blockIdx.x * blockDim.y + threadIdx.y;
const int stack = blockIdx.z;
const int s = blockIdx.y + gridDim.y * stack;
if (b < batch_size) {
for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) {
output_a[b][s][i] = s_input[i + threadIdx.y * stride * 2];
}
}
};
bbt_multiply_untied_cuda_kernel<scalar_t>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, save_output, log_stride, batch_size, nblocks) ;
});
TORCH_CHECK(cudaGetLastError() == cudaSuccess,
"bbt_conv2d_cuda failed with error code ",
cudaGetLastError());
}
void bbt_conv2d_forward_backward_cuda(const at::Tensor& twiddle,
const at::Tensor& input, const at::Tensor&grad,
at::Tensor& d_twiddle, at::Tensor& d_input,
const int kernel_size, const int padding, const int h_out, const int w_out) {
int batch_size = grad.size(0); // b_out = b_in * h_out * w_out
const int nstack = twiddle.size(0);
const int stack = kernel_size * kernel_size;
const int n = d_input.size(1); // c_in
const int log_n = int(log2((double) n));
int nblocks = twiddle.size(1) / (2 * log_n);
const int c_out_ratio = nstack / stack;
const int h_in = input.size(2);
const int w_in = input.size(3);
AT_DISPATCH_FLOATING_TYPES(grad.scalar_type(),
"bbt_conv2d_forward_backward_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
const auto twiddle_a = twiddle.packed_accessor32<scalar_t, 5, at::RestrictPtrTraits>();
const auto input_a = input.packed_accessor32<scalar_t, 4, at::RestrictPtrTraits>();
const auto grad_a = grad.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>();
auto d_twiddle_a = d_twiddle.packed_accessor32<scalar_t, 5, at::RestrictPtrTraits>();
auto d_input_a = d_input.packed_accessor32<scalar_t, 4, at::RestrictPtrTraits>();
int stride = std::min<int>(ELEMENTARY_SIZE, n / 2);
int log_stride = int(log2((double) stride));
dim3 block(stride, div_up(MAX_BLOCK_SIZE, stride * 2));
dim3 grid(div_up(batch_size, block.y), c_out_ratio, stack);
auto load_input = [batch_size, stride, input_a, kernel_size, padding, h_out, w_out, h_in, w_in] __device__ (scalar_t* s_input) {
const int b = blockIdx.x * blockDim.y + threadIdx.y;
const int stack = blockIdx.z;
const int patch_idx = b % (h_out * w_out);
const int batch_idx = b / (h_out * w_out);
if (b < batch_size) {
for (int t = threadIdx.x; t < stride * 2; t += blockDim.x) {
// get index into patch
int k_i = stack / kernel_size;
int k_j = stack % kernel_size;
// get patch index into full matrix
int p_i = (patch_idx) / w_out;
int p_j = (patch_idx) % (w_out);
// combine indices and adjust for padding
int i = k_i + p_i - padding;
int j = k_j + p_j - padding;
if (i >= w_in or j >= h_in or i < 0 or j < 0) s_input[t + threadIdx.y * stride * 2] = 0;
else{
s_input[t + threadIdx.y * stride * 2] = input_a[batch_idx][t][i][j];
}
}
}
};
auto load_grad = [batch_size, stride, grad_a] __device__ (scalar_t* s_grad) {
const int b = blockIdx.x * blockDim.y + threadIdx.y;
const int stack = blockIdx.z;
const int s = blockIdx.y + gridDim.y * stack;
if (b < batch_size) {
for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) {
s_grad[i + threadIdx.y * stride * 2] = grad_a[b][s][i];
}
}
};
auto save_d_input = [batch_size, stride, d_input_a, kernel_size, padding, h_out, w_out, h_in, w_in] __device__ (scalar_t* s_grad) mutable {
const int b = blockIdx.x * blockDim.y + threadIdx.y;
const int stack = blockIdx.z;
const int patch_idx = b % (h_out * w_out);
const int batch_idx = b / (h_out * w_out);
if (b < batch_size) {
for (int t = threadIdx.x; t < stride * 2; t += blockDim.x) {
// map back to b, c, h, w
// get index into patch
int k_i = stack / kernel_size;
int k_j = stack % kernel_size;
// get patch index into full matrix
int p_i = (patch_idx) / w_out;
int p_j = (patch_idx) % (w_out);
// combine indices and adjust for padding
int i = k_i + p_i - padding;
int j = k_j + p_j - padding;
if (i < w_in && j < h_in && i >= 0 && j >= 0) {
atomicAdd(&d_input_a[batch_idx][t][i][j], s_grad[t + threadIdx.y * stride * 2]);
}
}
}
};
switch (nblocks)
{
case 1:
bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 1>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break;
case 2:
bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 2>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break;
case 3:
bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 3>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break;
case 4:
bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 4>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break;
case 5:
bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 5>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break;
case 6:
bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 6>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break;
case 7:
bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 7>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break;
case 8:
bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 8>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break;
case 9:
bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 9>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break;
case 10:
bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 10>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break;
case 11:
bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 11>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break;
case 12:
bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 12>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break;
case 13:
bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 13>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break;
case 14:
bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 14>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break;
}
});
TORCH_CHECK(cudaGetLastError() == cudaSuccess,
"bbt_conv2d_forward_backward_cuda failed with error code ",
cudaGetLastError());
}
template <typename scalar_t>
__global__ void permutation_factor_even_odd_multiply_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 1> p_a,
const at::PackedTensorAccessor64<scalar_t, 3> input_a,
const at::PackedTensorAccessor64<scalar_t, 3> permuted_input_a,
at::PackedTensorAccessor64<scalar_t, 3> output_a) {
const auto p = p_a[0];
const auto batch_size = input_a.size(0);
const auto n = input_a.size(2); // already divided by 2
for (int64_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
for (int64_t b = blockIdx.y * blockDim.y + threadIdx.y; b < batch_size; b += blockDim.y * gridDim.y) {
#pragma unroll
for (int j = 0; j <= 1; ++j) {
output_a[b][j][i] = (1 - p) * input_a[b][j][i] + p * permuted_input_a[b][j][i];
}
}
}
}
template <typename scalar_t>
__global__ void permutation_factor_even_odd_multiply_complex_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 1> p_a,
const at::PackedTensorAccessor64<scalar_t, 4> input_a,
const at::PackedTensorAccessor64<scalar_t, 4> permuted_input_a,
at::PackedTensorAccessor64<scalar_t, 4> output_a) {
const auto p = p_a[0];
const auto batch_size = input_a.size(0);
const auto n = input_a.size(2);
for (int64_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
for (int64_t b = blockIdx.y * blockDim.y + threadIdx.y; b < batch_size; b += blockDim.y * gridDim.y) {
#pragma unroll
for (int j = 0; j <= 1; ++j) {
#pragma unroll
for (int k = 0; k <= 1; ++k) {
output_a[b][j][i][k] = (1 - p) * input_a[b][j][i][k] + p * permuted_input_a[b][j][i][k];
}
}
}
}
}
void permutation_factor_even_odd_multiply_cuda(const at::Tensor& p, const at::Tensor& input, at::Tensor& output) {
const auto batch_size = input.size(0);
const auto n = input.size(1);
dim3 block;
block.x = std::min<int64_t>(MAX_BLOCK_SIZE, n / 2);
block.y = div_up(MAX_BLOCK_SIZE, block.x);
dim3 grid(div_up(n / 2, block.x), div_up(batch_size, block.y * WORK_PER_THREAD));
AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "permutation_factor_even_odd_multiply", [&] {
const auto p_a = p.packed_accessor64<scalar_t, 1>();
switch (input.dim()) {
case 2: // real
{
const auto permuted_input = input.reshape({batch_size, n / 2, 2}).transpose(1, 2);
const auto input_folded = input.reshape({batch_size, 2, n / 2});
output = output.view({batch_size, 2, n / 2});
const auto input_a = input_folded.packed_accessor64<scalar_t, 3>();
const auto permuted_input_a = permuted_input.packed_accessor64<scalar_t, 3>();
auto output_a = output.packed_accessor64<scalar_t, 3>();
permutation_factor_even_odd_multiply_cuda_kernel<scalar_t>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(p_a, input_a, permuted_input_a, output_a);
output = output.view({batch_size, n});
break;
}
case 3: // complex
{
const auto permuted_input = input.reshape({batch_size, n / 2, 2, 2}).transpose(1, 2);
const auto input_folded = input.reshape({batch_size, 2, n / 2, 2});
output = output.view({batch_size, 2, n / 2, 2});
const auto input_a = input_folded.packed_accessor64<scalar_t, 4>();
const auto permuted_input_a = permuted_input.packed_accessor64<scalar_t, 4>();
auto output_a = output.packed_accessor64<scalar_t, 4>();
permutation_factor_even_odd_multiply_complex_cuda_kernel<scalar_t>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(p_a, input_a, permuted_input_a, output_a);
output = output.view({batch_size, n, 2});
break;
}
default:
AT_ERROR("permutation_factor_even_odd_multiply requires input dimension 2 or 3");
}
});
TORCH_CHECK(cudaGetLastError() == cudaSuccess,
"permutation_factor_even_odd_multiply_cuda failed with error code ",
cudaGetLastError());
}
template <typename scalar_t>
__global__ void permutation_factor_even_odd_multiply_backward_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 3> grad_a,
const at::PackedTensorAccessor64<scalar_t, 3> grad_reshaped_a,
const at::PackedTensorAccessor64<scalar_t, 3> permuted_grad_a,
const at::PackedTensorAccessor64<scalar_t, 1> p_a,
const at::PackedTensorAccessor64<scalar_t, 3> input_a,
const at::PackedTensorAccessor64<scalar_t, 3> permuted_input_a,
at::PackedTensorAccessor64<scalar_t, 2> d_p_expanded_a,
at::PackedTensorAccessor64<scalar_t, 3> d_input_a) {
const scalar_t p = p_a[0];
const auto batch_size = input_a.size(0);
const auto n = input_a.size(2);
for (int64_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
for (int64_t b = blockIdx.y * blockDim.y + threadIdx.y; b < batch_size; b += blockDim.y * gridDim.y) {
d_p_expanded_a[b][i] = (permuted_input_a[b][0][i] - input_a[b][0][i]) * grad_reshaped_a[b][0][i]
+ (permuted_input_a[b][1][i] - input_a[b][1][i]) * grad_reshaped_a[b][1][i];
d_input_a[b][i][0] = (1 - p) * grad_a[b][i][0] + p * permuted_grad_a[b][i][0];
d_input_a[b][i][1] = (1 - p) * grad_a[b][i][1] + p * permuted_grad_a[b][i][1];
}
}
}
template <typename scalar_t>
__global__ void permutation_factor_even_odd_multiply_complex_backward_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 4> grad_a,
const at::PackedTensorAccessor64<scalar_t, 4> grad_reshaped_a,
const at::PackedTensorAccessor64<scalar_t, 4> permuted_grad_a,
const at::PackedTensorAccessor64<scalar_t, 1> p_a,
const at::PackedTensorAccessor64<scalar_t, 4> input_a,
const at::PackedTensorAccessor64<scalar_t, 4> permuted_input_a,
at::PackedTensorAccessor64<scalar_t, 2> d_p_expanded_a,
at::PackedTensorAccessor64<scalar_t, 4> d_input_a) {
const scalar_t p = p_a[0];
const auto batch_size = input_a.size(0);
const auto n = input_a.size(2);
for (int64_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
for (int64_t b = blockIdx.y * blockDim.y + threadIdx.y; b < batch_size; b += blockDim.y * gridDim.y) {
d_p_expanded_a[b][i] = (permuted_input_a[b][0][i][0] - input_a[b][0][i][0]) * grad_reshaped_a[b][0][i][0]
+ (permuted_input_a[b][0][i][1] - input_a[b][0][i][1]) * grad_reshaped_a[b][0][i][1]
+ (permuted_input_a[b][1][i][0] - input_a[b][1][i][0]) * grad_reshaped_a[b][1][i][0]
+ (permuted_input_a[b][1][i][1] - input_a[b][1][i][1]) * grad_reshaped_a[b][1][i][1];
d_input_a[b][i][0][0] = (1 - p) * grad_a[b][i][0][0] + p * permuted_grad_a[b][i][0][0];
d_input_a[b][i][0][1] = (1 - p) * grad_a[b][i][0][1] + p * permuted_grad_a[b][i][0][1];
d_input_a[b][i][1][0] = (1 - p) * grad_a[b][i][1][0] + p * permuted_grad_a[b][i][1][0];
d_input_a[b][i][1][1] = (1 - p) * grad_a[b][i][1][1] + p * permuted_grad_a[b][i][1][1];
}
}
}
void permutation_factor_even_odd_multiply_backward_cuda(const at::Tensor& grad, const at::Tensor& p, const at::Tensor& input,
at::Tensor& d_p_expanded, at::Tensor& d_input) {
const auto batch_size = input.size(0);
const auto n = input.size(1);
dim3 block;
block.x = std::min<int64_t>(MAX_BLOCK_SIZE, n / 2);
block.y = div_up(MAX_BLOCK_SIZE, block.x);
dim3 grid(div_up(n / 2, block.x), div_up(batch_size, block.y * WORK_PER_THREAD));
AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "permutation_factor_even_odd_multiply_backward", [&] {
const auto p_a = p.packed_accessor64<scalar_t, 1>();
auto d_p_expanded_a = d_p_expanded.packed_accessor64<scalar_t, 2>();
switch (input.dim()) {
case 2: // real
{
const auto permuted_input = input.reshape({batch_size, n / 2, 2}).transpose(1, 2);
const auto input_folded = input.reshape({batch_size, 2, n / 2});
const auto grad_reshaped = grad.reshape({batch_size, 2, n / 2});
const auto permuted_grad = grad.reshape({batch_size, 2, n / 2}).transpose(1, 2);
const auto grad_folded = grad.reshape({batch_size, n / 2, 2});
d_input = d_input.view({batch_size, n/ 2, 2});
// Accessors
const auto input_a = input_folded.packed_accessor64<scalar_t, 3>();
const auto permuted_input_a = permuted_input.packed_accessor64<scalar_t, 3>();
const auto grad_reshaped_a = grad_reshaped.packed_accessor64<scalar_t, 3>();
const auto grad_a = grad_folded.packed_accessor64<scalar_t, 3>();
const auto permuted_grad_a = permuted_grad.packed_accessor64<scalar_t, 3>();
auto d_input_a = d_input.packed_accessor64<scalar_t, 3>();
permutation_factor_even_odd_multiply_backward_cuda_kernel<scalar_t>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(grad_a, grad_reshaped_a, permuted_grad_a, p_a, input_a, permuted_input_a, d_p_expanded_a, d_input_a);
d_input = d_input.view({batch_size, n});
break;
}
case 3: // complex
{
const auto permuted_input = input.reshape({batch_size, n / 2, 2, 2}).transpose(1, 2);
const auto input_folded = input.reshape({batch_size, 2, n / 2, 2});
const auto grad_reshaped = grad.reshape({batch_size, 2, n / 2, 2});
const auto permuted_grad = grad.reshape({batch_size, 2, n / 2, 2}).transpose(1, 2);
const auto grad_folded = grad.reshape({batch_size, n / 2, 2, 2});
d_input = d_input.view({batch_size, n/ 2, 2, 2});
// Accessors
const auto input_a = input_folded.packed_accessor64<scalar_t, 4>();
const auto permuted_input_a = permuted_input.packed_accessor64<scalar_t, 4>();
const auto grad_reshaped_a = grad_reshaped.packed_accessor64<scalar_t, 4>();
const auto grad_a = grad_folded.packed_accessor64<scalar_t, 4>();
const auto permuted_grad_a = permuted_grad.packed_accessor64<scalar_t, 4>();
auto d_input_a = d_input.packed_accessor64<scalar_t, 4>();
permutation_factor_even_odd_multiply_complex_backward_cuda_kernel<scalar_t>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(grad_a, grad_reshaped_a, permuted_grad_a, p_a, input_a, permuted_input_a, d_p_expanded_a, d_input_a);
d_input = d_input.view({batch_size, n, 2});
break;
}
default:
AT_ERROR("permutation_factor_even_odd_multiply_backward requires input dimension 2 or 3");
}
});
TORCH_CHECK(cudaGetLastError() == cudaSuccess,
"permutation_factor_even_odd_multiply_backward_cuda failed with error code ",
cudaGetLastError());
}
template <typename scalar_t>
__global__ void permutation_factor_reverse_multiply_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 1> p_a,
const at::PackedTensorAccessor64<scalar_t, 3> input_a,
at::PackedTensorAccessor64<scalar_t, 3> output_a) {
const scalar_t p[2] = {p_a[0], p_a[1]};
const auto batch_size = input_a.size(0);
const auto n = input_a.size(2); // already divided by 2
for (int64_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n / 2; i += blockDim.x * gridDim.x) {
for (int64_t b = blockIdx.y * blockDim.y + threadIdx.y; b < batch_size; b += blockDim.y * gridDim.y) {
#pragma unroll
for (int j = 0; j <= 1; ++j) {
const scalar_t in[2] = {input_a[b][j][i], input_a[b][j][n - 1 - i]};
output_a[b][j][i] = (1 - p[j]) * in[0] + p[j] * in[1];
output_a[b][j][n - 1 - i] = p[j] * in[0] + (1 - p[j]) * in[1];
}
}
}
}
template <typename scalar_t>
__global__ void permutation_factor_reverse_multiply_complex_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 1> p_a,
const at::PackedTensorAccessor64<scalar_t, 4> input_a,
at::PackedTensorAccessor64<scalar_t, 4> output_a) {
const scalar_t p[2] = {p_a[0], p_a[1]};
const auto batch_size = input_a.size(0);
const auto n = input_a.size(2);
for (int64_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n / 2; i += blockDim.x * gridDim.x) {
for (int64_t b = blockIdx.y * blockDim.y + threadIdx.y; b < batch_size; b += blockDim.y * gridDim.y) {
#pragma unroll
for (int j = 0; j <= 1; ++j) {
#pragma unroll
for (int k = 0; k <= 1; ++k) {
const scalar_t in[2] = {input_a[b][j][i][k], input_a[b][j][n - 1 - i][k]};
output_a[b][j][i][k] = (1 - p[j]) * in[0] + p[j] * in[1];
output_a[b][j][n - 1 - i][k] = p[j] * in[0] + (1 - p[j]) * in[1];
}
}
}
}
}
void permutation_factor_reverse_multiply_cuda(const at::Tensor& p, const at::Tensor& input, at::Tensor& output) {
const auto batch_size = input.size(0);
const auto n = input.size(1);
dim3 block;
block.x = std::min<int64_t>(MAX_BLOCK_SIZE, n / 2);
block.y = div_up(MAX_BLOCK_SIZE, block.x);
dim3 grid(div_up(n / 4, block.x), div_up(batch_size, block.y * WORK_PER_THREAD));
AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "permutation_factor_reverse_multiply", [&] {
const auto p_a = p.packed_accessor64<scalar_t, 1>();
switch (input.dim()) {
case 2: // real
{
const auto input_folded = input.reshape({batch_size, 2, n / 2});
output = output.view({batch_size, 2, n / 2});
const auto input_a = input_folded.packed_accessor64<scalar_t, 3>();
auto output_a = output.packed_accessor64<scalar_t, 3>();
permutation_factor_reverse_multiply_cuda_kernel<scalar_t>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(p_a, input_a, output_a);
output = output.view({batch_size, n});
break;
}
case 3: // complex
{
const auto input_folded = input.reshape({batch_size, 2, n / 2, 2});
output = output.view({batch_size, 2, n / 2, 2});
const auto input_a = input_folded.packed_accessor64<scalar_t, 4>();
auto output_a = output.packed_accessor64<scalar_t, 4>();
permutation_factor_reverse_multiply_complex_cuda_kernel<scalar_t>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(p_a, input_a, output_a);
output = output.view({batch_size, n, 2});
break;
}
default:
AT_ERROR("permutation_factor_reverse_multiply requires input dimension 2 or 3");
}
});
TORCH_CHECK(cudaGetLastError() == cudaSuccess,
"permutation_factor_reverse_multiply_cuda failed with error code ",
cudaGetLastError());
}
template <typename scalar_t>
__global__ void permutation_factor_reverse_multiply_backward_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 3> grad_a,
const at::PackedTensorAccessor64<scalar_t, 1> p_a,
const at::PackedTensorAccessor64<scalar_t, 3> input_a,
at::PackedTensorAccessor64<scalar_t, 3> d_p_expanded_a,
at::PackedTensorAccessor64<scalar_t, 3> d_input_a) {
const scalar_t p[2] = {p_a[0], p_a[1]};
const auto batch_size = input_a.size(0);
const auto n = input_a.size(2);
for (int64_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n / 2; i += blockDim.x * gridDim.x) {
for (int64_t b = blockIdx.y * blockDim.y + threadIdx.y; b < batch_size; b += blockDim.y * gridDim.y) {
#pragma unroll
for (int j = 0; j <= 1; ++j) {
const scalar_t in[2] = {input_a[b][j][i], input_a[b][j][n - 1 - i]};
const scalar_t g[2] = {grad_a[b][j][i], grad_a[b][j][n - 1 - i]};
d_p_expanded_a[j][b][i] = (in[1] - in[0]) * (g[0] - g[1]);
d_input_a[b][j][i] = (1 - p[j]) * g[0] + p[j] * g[1];
d_input_a[b][j][n - 1 - i] = p[j] * g[0] + (1 - p[j]) * g[1];
}
}
}
}
template <typename scalar_t>
__global__ void permutation_factor_reverse_multiply_complex_backward_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 4> grad_a,
const at::PackedTensorAccessor64<scalar_t, 1> p_a,
const at::PackedTensorAccessor64<scalar_t, 4> input_a,
at::PackedTensorAccessor64<scalar_t, 3> d_p_expanded_a,
at::PackedTensorAccessor64<scalar_t, 4> d_input_a) {
const scalar_t p[2] = {p_a[0], p_a[1]};
const auto batch_size = input_a.size(0);
const auto n = input_a.size(2);
for (int64_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n / 2; i += blockDim.x * gridDim.x) {
for (int64_t b = blockIdx.y * blockDim.y + threadIdx.y; b < batch_size; b += blockDim.y * gridDim.y) {
#pragma unroll
for (int j = 0; j <= 1; ++j) {
scalar_t d_p_expanded_temp = 0;
#pragma unroll
for (int k = 0; k <= 1; ++k) {
const scalar_t in[2] = {input_a[b][j][i][k], input_a[b][j][n - 1 - i][k]};
const scalar_t g[2] = {grad_a[b][j][i][k], grad_a[b][j][n - 1 - i][k]};
d_p_expanded_temp += (in[1] - in[0]) * (g[0] - g[1]);
d_input_a[b][j][i][k] = (1 - p[j]) * g[0] + p[j] * g[1];
d_input_a[b][j][n - 1 - i][k] = p[j] * g[0] + (1 - p[j]) * g[1];
}
d_p_expanded_a[j][b][i] = d_p_expanded_temp;
}
}
}
}
void permutation_factor_reverse_multiply_backward_cuda(const at::Tensor& grad, const at::Tensor& p, const at::Tensor& input,
at::Tensor& d_p_expanded, at::Tensor& d_input) {
const auto batch_size = input.size(0);
const auto n = input.size(1);
dim3 block;
block.x = std::min<int64_t>(MAX_BLOCK_SIZE, n / 2);
block.y = div_up(MAX_BLOCK_SIZE, block.x);
dim3 grid(div_up(n / 4, block.x), div_up(batch_size, block.y * WORK_PER_THREAD));
AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "permutation_factor_reverse_multiply_backward", [&] {
const auto p_a = p.packed_accessor64<scalar_t, 1>();
auto d_p_expanded_a = d_p_expanded.packed_accessor64<scalar_t, 3>();
switch (input.dim()) {
case 2: // real
{
const auto input_folded = input.reshape({batch_size, 2, n / 2});
const auto grad_folded = grad.reshape({batch_size, 2, n / 2});
d_input = d_input.view({batch_size, 2, n/ 2});
// Accessors
const auto input_a = input_folded.packed_accessor64<scalar_t, 3>();
const auto grad_a = grad_folded.packed_accessor64<scalar_t, 3>();
auto d_input_a = d_input.packed_accessor64<scalar_t, 3>();
permutation_factor_reverse_multiply_backward_cuda_kernel<scalar_t>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(grad_a, p_a, input_a, d_p_expanded_a, d_input_a);
d_input = d_input.view({batch_size, n});
break;
}
case 3: // complex
{
const auto input_folded = input.reshape({batch_size, 2, n / 2, 2});
const auto grad_folded = grad.reshape({batch_size, 2, n / 2, 2});
d_input = d_input.view({batch_size, 2, n/ 2, 2});
// Accessors
const auto input_a = input_folded.packed_accessor64<scalar_t, 4>();
const auto grad_a = grad_folded.packed_accessor64<scalar_t, 4>();
auto d_input_a = d_input.packed_accessor64<scalar_t, 4>();
permutation_factor_reverse_multiply_complex_backward_cuda_kernel<scalar_t>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(grad_a, p_a, input_a, d_p_expanded_a, d_input_a);
d_input = d_input.view({batch_size, n, 2});
break;
}
default:
AT_ERROR("permutation_factor_reverse_multiply_backward requires input dimension 2 or 3");
}
});
TORCH_CHECK(cudaGetLastError() == cudaSuccess,
"permutation_factor_reverse_multiply_backward_cuda failed with error code ",
cudaGetLastError());
}
|
the_stack
|
* This sample demonstrates a combination of Peer-to-Peer (P2P) and
* Unified Virtual Address Space (UVA) features new to SDK 4.0
*/
#include <shrUtils.h>
// includes, system
#include <stdlib.h>
#include <stdio.h>
// includes, project
#include <sdkHelper.h> // helper for shared that are common to CUDA SDK samples
#include <shrQATest.h> // This is for automated testing output (--qatest)
// CUDA includes
#include <cuda_runtime.h>
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors( cudaError err, const char *file, const int line )
{
if( cudaSuccess != err) {
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",
file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError( const char *errorMessage, const char *file, const int line )
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err) {
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",
file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// General GPU Device CUDA Initialization
int gpuDeviceInit(int devID)
{
int deviceCount;
checkCudaErrors(cudaGetDeviceCount(&deviceCount));
if (deviceCount == 0) {
fprintf(stderr, "gpuDeviceInit() CUDA error: no devices supporting CUDA.\n");
exit(-1);
}
if (devID < 0)
devID = 0;
if (devID > deviceCount-1) {
fprintf(stderr, "\n");
fprintf(stderr, ">> %d CUDA capable GPU device(s) detected. <<\n", deviceCount);
fprintf(stderr, ">> gpuDeviceInit (-device=%d) is not a valid GPU device. <<\n", devID);
fprintf(stderr, "\n");
return -devID;
}
cudaDeviceProp deviceProp;
checkCudaErrors( cudaGetDeviceProperties(&deviceProp, devID) );
if (deviceProp.major < 1) {
fprintf(stderr, "gpuDeviceInit(): GPU device does not support CUDA.\n");
exit(-1); \
}
checkCudaErrors( cudaSetDevice(devID) );
printf("> gpuDeviceInit() CUDA device [%d]: %s\n", devID, deviceProp.name);
return devID;
}
// This function returns the best GPU (with maximum GFLOPS)
int gpuGetMaxGflopsDeviceId()
{
int current_device = 0, sm_per_multiproc = 0;
int max_compute_perf = 0, max_perf_device = 0;
int device_count = 0, best_SM_arch = 0;
cudaDeviceProp deviceProp;
cudaGetDeviceCount( &device_count );
// Find the best major SM Architecture GPU device
while ( current_device < device_count ) {
cudaGetDeviceProperties( &deviceProp, current_device );
if (deviceProp.major > 0 && deviceProp.major < 9999) {
best_SM_arch = MAX(best_SM_arch, deviceProp.major);
}
current_device++;
}
// Find the best CUDA capable GPU device
current_device = 0;
while( current_device < device_count ) {
cudaGetDeviceProperties( &deviceProp, current_device );
if (deviceProp.major == 9999 && deviceProp.minor == 9999) {
sm_per_multiproc = 1;
} else {
sm_per_multiproc = _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor);
}
int compute_perf = deviceProp.multiProcessorCount * sm_per_multiproc * deviceProp.clockRate;
if( compute_perf > max_compute_perf ) {
// If we find GPU with SM major > 2, search only these
if ( best_SM_arch > 2 ) {
// If our device==dest_SM_arch, choose this, or else pass
if (deviceProp.major == best_SM_arch) {
max_compute_perf = compute_perf;
max_perf_device = current_device;
}
} else {
max_compute_perf = compute_perf;
max_perf_device = current_device;
}
}
++current_device;
}
return max_perf_device;
}
// Initialization code to find the best CUDA Device
int findCudaDevice(int argc, const char **argv)
{
cudaDeviceProp deviceProp;
int devID = 0;
// If the command-line has a device number specified, use it
if (checkCmdLineFlag(argc, argv, "device")) {
devID = getCmdLineArgumentInt(argc, argv, "device=");
if (devID < 0) {
printf("Invalid command line parameters\n");
exit(-1);
} else {
devID = gpuDeviceInit(devID);
if (devID < 0) {
printf("exiting...\n");
shrQAFinishExit(argc, (const char **)argv, QA_FAILED);
exit(-1);
}
}
} else {
// Otherwise pick the device with highest Gflops/s
devID = gpuGetMaxGflopsDeviceId();
checkCudaErrors( cudaSetDevice( devID ) );
checkCudaErrors( cudaGetDeviceProperties(&deviceProp, devID) );
printf("> Using CUDA device [%d]: %s\n", devID, deviceProp.name);
}
return devID;
}
// end of CUDA Helper Functions
__global__ void SimpleKernel(float *src, float *dst)
{
// Just a dummy kernel, doing enough for us to verify that everything
// worked
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
dst[idx] = src[idx] * 2.0f;
}
inline bool IsGPUCapableP2P(cudaDeviceProp *pProp)
{
#ifdef _WIN32
return (bool)(pProp->tccDriver ? true : false);
#else
return (bool)(pProp->major >= 2);
#endif
}
inline bool IsAppBuiltAs64()
{
#if defined(__x86_64) || defined(AMD64) || defined(_M_AMD64)
return 1;
#else
return 0;
#endif
}
int main(int argc, char **argv)
{
shrQAStart(argc, argv);
if (!IsAppBuiltAs64()) {
printf("%s is only supported with on 64-bit OSs and the application must be built as a 64-bit target. Test is being waived.\n", argv[0]);
shrQAFinishExit(argc, (const char **)argv, QA_PASSED);
exit(EXIT_SUCCESS);
}
// Number of GPUs
printf("Checking for multiple GPUs...\n");
int gpu_n;
checkCudaErrors(cudaGetDeviceCount(&gpu_n));
printf("CUDA-capable device count: %i\n", gpu_n);
if (gpu_n < 2)
{
printf("Two or more (SM 2.0) class GPUs are required for %s.\n", argv[0]);
printf("Waiving test.\n");
shrQAFinishExit(argc, (const char **)argv, QA_PASSED);
exit(EXIT_SUCCESS);
}
// Query device properties
cudaDeviceProp prop[64];
int gpuid[64]; // we want to find the first two GPU's that can support P2P
int gpu_count = 0; // GPUs that meet the criteria
for (int i=0; i < gpu_n; i++) {
checkCudaErrors(cudaGetDeviceProperties(&prop[i], i));
// Only boards based on Fermi can support P2P
if ((prop[i].major >= 2)
#ifdef _WIN32
// on Windows (64-bit), the Tesla Compute Cluster driver for windows must be enabled
&& prop[i].tccDriver
#endif
)
{
// This is an array of P2P capable GPUs
gpuid[gpu_count++] = i;
}
printf("> GPU%d = \"%15s\" %s capable of Peer-to-Peer (P2P)\n", i, prop[i].name, (IsGPUCapableP2P(&prop[i]) ? "IS " : "NOT"));
}
// Check for TCC for Windows
if (gpu_count < 2)
{
printf("\nThis sample requires two SM 2.0 GPUs to use P2P/UVA functionality.\n");
#ifdef _WIN32
printf("\nFor Windows Vista/Win7, a TCC driver must be installed and enabled to use P2P/UVA functionality.\n");
#endif
shrQAFinishExit(argc, (const char **)argv, QA_PASSED);
exit(EXIT_SUCCESS);
}
#if CUDART_VERSION >= 4000
// Check possibility for peer access
printf("\nChecking GPU(s) for support of peer to peer memory access...\n");
int can_access_peer_0_1, can_access_peer_1_0;
// In this case we just pick the first two that we can support
checkCudaErrors(cudaDeviceCanAccessPeer(&can_access_peer_0_1, gpuid[0], gpuid[1]));
checkCudaErrors(cudaDeviceCanAccessPeer(&can_access_peer_1_0, gpuid[1], gpuid[0]));
// Output results from P2P capabilities
printf("> Peer access from %s (GPU%d) -> %s (GPU%d) : %s\n", prop[gpuid[0]].name, gpuid[0],
prop[gpuid[1]].name, gpuid[1] ,
can_access_peer_0_1 ? "Yes" : "No");
printf("> Peer access from %s (GPU%d) -> %s (GPU%d) : %s\n", prop[gpuid[1]].name, gpuid[1],
prop[gpuid[0]].name, gpuid[0],
can_access_peer_1_0 ? "Yes" : "No");
if (can_access_peer_0_1 == 0 || can_access_peer_1_0 == 0)
{
printf("Two or more SM 2.0 class GPUs are required for %s to run.\n", argv[0]);
printf("Support for UVA requires a GPU with SM 2.0 capabilities.\n");
printf("Peer to Peer access is not available between GPU%d <-> GPU%d, waiving test.\n", gpuid[0], gpuid[1]);
printf("PASSED\n");
exit(EXIT_SUCCESS);
}
// Enable peer access
printf("Enabling peer access between GPU%d and GPU%d...\n", gpuid[0], gpuid[1]);
checkCudaErrors(cudaSetDevice(gpuid[0]));
checkCudaErrors(cudaDeviceEnablePeerAccess(gpuid[1], 0));
checkCudaErrors(cudaSetDevice(gpuid[1]));
checkCudaErrors(cudaDeviceEnablePeerAccess(gpuid[0], 0));
// Check that we got UVA on both devices
printf("Checking GPU%d and GPU%d for UVA capabilities...\n", gpuid[0], gpuid[1]);
const bool has_uva = (prop[gpuid[0]].unifiedAddressing && prop[gpuid[1]].unifiedAddressing);
printf("> %s (GPU%d) supports UVA: %s\n", prop[gpuid[0]].name, gpuid[0], (prop[gpuid[0]].unifiedAddressing ? "Yes" : "No") );
printf("> %s (GPU%d) supports UVA: %s\n", prop[gpuid[1]].name, gpuid[1], (prop[gpuid[1]].unifiedAddressing ? "Yes" : "No") );
if (has_uva) {
printf("Both GPUs can support UVA, enabling...\n");
} else {
printf("At least one of the two GPUs does NOT support UVA, waiving test.\n");
printf("PASSED\n");
exit(EXIT_SUCCESS);
}
// Allocate buffers
const size_t buf_size = 1024 * 1024 * 16 * sizeof(float);
printf("Allocating buffers (%iMB on GPU%d, GPU%d and CPU Host)...\n", int(buf_size / 1024 / 1024), gpuid[0], gpuid[1]);
checkCudaErrors(cudaSetDevice(gpuid[0]));
float* g0;
checkCudaErrors(cudaMalloc(&g0, buf_size));
checkCudaErrors(cudaSetDevice(gpuid[1]));
float* g1;
checkCudaErrors(cudaMalloc(&g1, buf_size));
float* h0;
checkCudaErrors(cudaMallocHost(&h0, buf_size)); // Automatically portable with UVA
// Create CUDA event handles
printf("Creating event handles...\n");
cudaEvent_t start_event, stop_event;
float time_memcpy;
int eventflags = cudaEventBlockingSync;
checkCudaErrors(cudaEventCreateWithFlags(&start_event, eventflags));
checkCudaErrors(cudaEventCreateWithFlags(&stop_event, eventflags));
// P2P memcopy() benchmark
checkCudaErrors(cudaEventRecord(start_event, 0));
for (int i=0; i<100; i++)
{
// With UVA we don't need to specify source and target devices, the
// runtime figures this out by itself from the pointers
// Ping-pong copy between GPUs
if (i % 2 == 0)
checkCudaErrors(cudaMemcpy(g1, g0, buf_size, cudaMemcpyDefault));
else
checkCudaErrors(cudaMemcpy(g0, g1, buf_size, cudaMemcpyDefault));
}
checkCudaErrors(cudaEventRecord(stop_event, 0));
checkCudaErrors(cudaEventSynchronize(stop_event));
checkCudaErrors(cudaEventElapsedTime(&time_memcpy, start_event, stop_event));
printf("cudaMemcpyPeer / cudaMemcpy between GPU%d and GPU%d: %.2fGB/s\n", gpuid[0], gpuid[1],
(1.0f / (time_memcpy / 1000.0f)) * ((100.0f * buf_size)) / 1024.0f / 1024.0f / 1024.0f);
// Prepare host buffer and copy to GPU 0
printf("Preparing host buffer and memcpy to GPU%d...\n", gpuid[0]);
for (int i=0; i<buf_size / sizeof(float); i++)
{
h0[i] = float(i % 4096);
}
checkCudaErrors(cudaSetDevice(gpuid[0]));
checkCudaErrors(cudaMemcpy(g0, h0, buf_size, cudaMemcpyDefault));
// Kernel launch configuration
const dim3 threads(512, 1);
const dim3 blocks((buf_size / sizeof(float)) / threads.x, 1);
// Run kernel on GPU 1, reading input from the GPU 0 buffer, writing
// output to the GPU 1 buffer
printf("Run kernel on GPU%d, taking source data from GPU%d and writing to GPU%d...\n",
gpuid[1], gpuid[0], gpuid[1]);
checkCudaErrors(cudaSetDevice(gpuid[1]));
SimpleKernel<<<blocks, threads>>> (g0, g1);
checkCudaErrors( cudaDeviceSynchronize() );
// Run kernel on GPU 0, reading input from the GPU 1 buffer, writing
// output to the GPU 0 buffer
printf("Run kernel on GPU%d, taking source data from GPU%d and writing to GPU%d...\n",
gpuid[0], gpuid[1], gpuid[0]);
checkCudaErrors(cudaSetDevice(gpuid[0]));
SimpleKernel<<<blocks, threads>>> (g1, g0);
checkCudaErrors( cudaDeviceSynchronize() );
// Copy data back to host and verify
printf("Copy data back to host from GPU%d and verify results...\n", gpuid[0]);
checkCudaErrors(cudaMemcpy(h0, g0, buf_size, cudaMemcpyDefault));
int error_count = 0;
for (int i=0; i<buf_size / sizeof(float); i++)
{
// Re-generate input data and apply 2x '* 2.0f' computation of both
// kernel runs
if (h0[i] != float(i % 4096) * 2.0f * 2.0f)
{
printf("Verification error @ element %i: val = %f, ref = %f\n", i, h0[i], (float(i%4096)*2.0f*2.0f) );
if (error_count++ > 10)
break;
}
}
// Disable peer access (also unregisters memory for non-UVA cases)
printf("Enabling peer access...\n");
checkCudaErrors(cudaSetDevice(gpuid[0]));
checkCudaErrors(cudaDeviceDisablePeerAccess(gpuid[1]));
checkCudaErrors(cudaSetDevice(gpuid[1]));
checkCudaErrors(cudaDeviceDisablePeerAccess(gpuid[0]));
// Cleanup and shutdown
printf("Shutting down...\n");
checkCudaErrors(cudaEventDestroy(start_event));
checkCudaErrors(cudaEventDestroy(stop_event));
checkCudaErrors(cudaSetDevice(gpuid[0]));
checkCudaErrors(cudaFree(g0));
checkCudaErrors(cudaSetDevice(gpuid[1]));
checkCudaErrors(cudaFree(g1));
checkCudaErrors(cudaFreeHost(h0));
for( int i=0; i<gpu_n; i++ ) {
checkCudaErrors( cudaSetDevice(i) );
cudaDeviceReset();
}
shrQAFinishExit(argc, (const char **)argv, (error_count == 0) ? QA_PASSED : QA_FAILED);
#else // Using CUDA 3.2 or older
printf("simpleP2P requires CUDA 4.0 to build and run, waiving testing\n");
shrQAFinishExit(argc, (const char **)argv, QA_PASSED);
#endif
}
|
the_stack
|
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/binary_search.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/transform_iterator.h>
namespace cudf {
namespace detail {
namespace {
/**
* @brief The enum specifying which sorting method to use (stable or unstable).
*/
enum class sort_method { STABLE, UNSTABLE };
// returns segment indices for each element for all segments.
// first segment begin index = 0, last segment end index = num_rows.
rmm::device_uvector<size_type> get_segment_indices(size_type num_rows,
column_view const& offsets,
rmm::cuda_stream_view stream)
{
rmm::device_uvector<size_type> segment_ids(num_rows, stream);
auto offset_begin = offsets.begin<size_type>(); // assumes already offset column contains offset.
auto offsets_minus_one = thrust::make_transform_iterator(
offset_begin, [offset_begin] __device__(auto i) { return i - 1; });
auto counting_iter = thrust::make_counting_iterator<size_type>(0);
thrust::lower_bound(rmm::exec_policy(stream),
offsets_minus_one,
offsets_minus_one + offsets.size(),
counting_iter,
counting_iter + segment_ids.size(),
segment_ids.begin());
return segment_ids;
}
std::unique_ptr<column> segmented_sorted_order_common(
table_view const& keys,
column_view const& segment_offsets,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
sort_method sorting,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(segment_offsets.type() == data_type(type_to_id<size_type>()),
"segment offsets should be size_type");
// Get segment id of each element in all segments.
auto segment_ids = get_segment_indices(keys.num_rows(), segment_offsets, stream);
// insert segment id before all columns.
std::vector<column_view> keys_with_segid;
keys_with_segid.reserve(keys.num_columns() + 1);
keys_with_segid.push_back(
column_view(data_type(type_to_id<size_type>()), segment_ids.size(), segment_ids.data()));
keys_with_segid.insert(keys_with_segid.end(), keys.begin(), keys.end());
auto segid_keys = table_view(keys_with_segid);
auto prepend_default = [](auto const& vector, auto default_value) {
if (vector.empty()) return vector;
std::remove_cv_t<std::remove_reference_t<decltype(vector)>> pre_vector;
pre_vector.reserve(pre_vector.size() + 1);
pre_vector.push_back(default_value);
pre_vector.insert(pre_vector.end(), vector.begin(), vector.end());
return pre_vector;
};
auto child_column_order = prepend_default(column_order, order::ASCENDING);
auto child_null_precedence = prepend_default(null_precedence, null_order::AFTER);
// return sorted order of child columns
return sorting == sort_method::STABLE
? detail::stable_sorted_order(
segid_keys, child_column_order, child_null_precedence, stream, mr)
: detail::sorted_order(
segid_keys, child_column_order, child_null_precedence, stream, mr);
}
std::unique_ptr<table> segmented_sort_by_key_common(table_view const& values,
table_view const& keys,
column_view const& segment_offsets,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
sort_method sorting,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(values.num_rows() == keys.num_rows(),
"Mismatch in number of rows for values and keys");
auto sorted_order = sorting == sort_method::STABLE
? stable_segmented_sorted_order(keys,
segment_offsets,
column_order,
null_precedence,
stream,
rmm::mr::get_current_device_resource())
: segmented_sorted_order(keys,
segment_offsets,
column_order,
null_precedence,
stream,
rmm::mr::get_current_device_resource());
// Gather segmented sort of child value columns`
return detail::gather(values,
sorted_order->view(),
out_of_bounds_policy::DONT_CHECK,
detail::negative_index_policy::NOT_ALLOWED,
stream,
mr);
}
} // namespace
std::unique_ptr<column> segmented_sorted_order(table_view const& keys,
column_view const& segment_offsets,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return segmented_sorted_order_common(
keys, segment_offsets, column_order, null_precedence, sort_method::UNSTABLE, stream, mr);
}
std::unique_ptr<column> stable_segmented_sorted_order(
table_view const& keys,
column_view const& segment_offsets,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return segmented_sorted_order_common(
keys, segment_offsets, column_order, null_precedence, sort_method::STABLE, stream, mr);
}
std::unique_ptr<table> segmented_sort_by_key(table_view const& values,
table_view const& keys,
column_view const& segment_offsets,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return segmented_sort_by_key_common(values,
keys,
segment_offsets,
column_order,
null_precedence,
sort_method::UNSTABLE,
stream,
mr);
}
std::unique_ptr<table> stable_segmented_sort_by_key(table_view const& values,
table_view const& keys,
column_view const& segment_offsets,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return segmented_sort_by_key_common(
values, keys, segment_offsets, column_order, null_precedence, sort_method::STABLE, stream, mr);
}
} // namespace detail
std::unique_ptr<column> segmented_sorted_order(table_view const& keys,
column_view const& segment_offsets,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::segmented_sorted_order(
keys, segment_offsets, column_order, null_precedence, rmm::cuda_stream_default, mr);
}
std::unique_ptr<column> stable_segmented_sorted_order(
table_view const& keys,
column_view const& segment_offsets,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::stable_segmented_sorted_order(
keys, segment_offsets, column_order, null_precedence, rmm::cuda_stream_default, mr);
}
std::unique_ptr<table> segmented_sort_by_key(table_view const& values,
table_view const& keys,
column_view const& segment_offsets,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::segmented_sort_by_key(
values, keys, segment_offsets, column_order, null_precedence, rmm::cuda_stream_default, mr);
}
std::unique_ptr<table> stable_segmented_sort_by_key(table_view const& values,
table_view const& keys,
column_view const& segment_offsets,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::stable_segmented_sort_by_key(
values, keys, segment_offsets, column_order, null_precedence, rmm::cuda_stream_default, mr);
}
} // namespace cudf
|
the_stack
|
#include "TemplateFactory.h"
#include <iostream>
#include <ctime>
#include <cstring>
using namespace std;
#include "ErrorCode.h"
// 宏:TF_ENABLE_KICK(开启替换规则)
// 开关宏,使能该宏,则在模板池存放满了模板以后,新的模板会替换不常用的模板,但
// 这种做法可能会拉慢单次调用的性能;如果关闭该宏,则单次处理性能会提高,但是从
// 总体来看,后续的模板频繁的申请与释放会带来内存处理的压力,总体性能可能会下
// 降。将以在模板种类使用较少时关闭该宏;如果使用模板种类较多,建议使能该宏。
#define TF_ENABLE_KICK
// 结构体:TFTemplateVendor(生产模板的机器)
// 该结构体给出了模板制造的机制,提供了产生新模板、判断参数合法性等方法的接口,
// 各种模板都实现一个这样的结构提的实例,并实现这其中所有的函数指针。并将这个结
// 构体实例添加到模板机器池中,这样通过外接的接口就可以调用到创造模板的这些函数
// 接口的实现方法。
typedef struct TFTemplateVendor_st {
bool (*isLegalParam)(dim3 size, // 不同的模板对于参数有着不同
void *privated); // 的要求,该函数接口用于判断
// 给定的参数对应当下的模板类
// 型是否是合法的。例如,对于
// 圆形模板我们要求直径要大于
// 3 且为奇数。该函数返回真时
// 说明参数是合法的。
int (*getHashIndex)(dim3 size, // 由于不同类型的模板的参数结
void *privated); // 构不尽相同,因此其 Hash 算
// 法也会不同。该接口用于返回
// 给定参数的 Hash 值,如果给
// 定的参数不合法,或运算过程
// 中发生错误,则返回错误码。
// 返回的 Hash 值范围为 0 至
// TF_VOL_SHAPE - 1 的整型
// 数。
Template *(*createTemplate)(dim3 size, // 该函数接口根据给定的参数返
void *privated); // 回一个该尺寸的 Template。
// 如果给定的参数不合法,或者
// 计算过程中出现错误,则返回
// NULL。
bool (*isEqualSize)(dim3 size1, // 该函数用于检查给定的两组参
void *privated1, // 数是否相等。由于不同的模板
dim3 size2, // 对参数的要求不同,因此其各
void *privated2); // 自都有着不同的判断方法,因
// 此,我们将这个问题抛给具体
// 模板,如果两个参数中有一个
// 不合法则恒返回假。
void *(*copyPrivated)(void *privated); // 由于专属参数 privated 满足
// 普适性,只能以指针的形式给
// 出,但如果在模板池中以不稳
// 定的指针形式存储会给系统带
// 来风险,因此此函数接口用来
// 拷贝出一个仅在模板池内部使
// 用的 privated,将这个参数
// 存入模板池,是的系统的稳定
// 性得到保证。
bool (*deletePrivated)(void *privated); // 释放 privated 的内存。由于
// 各种不同种类的模板具有不同
// 的专属参数,因此需要不同的
// 的释放函数,这个用于从模板
// 池中踢出模板时使用。
} TFTemplateVendor;
// 定义不同的模板(CLASS 的实现在文件末尾):
// 标准模板生成器示例代码和一些辅助的函数:
// Host 函数:_stretchDigit(抻拉二进制数)
// 抻拉一个二进制数,再各个二进制位之间补充若干个 0。该函数作为混悬数据的一个基
// 础操作存在。二进制数被抻拉后,只保留能存储的低位数据,高位数据被舍去。
static __host__ unsigned int // 数字被抻拉后的结果
_stretchDigit(
unsigned int num, // 原始数据
int extent // 抻拉的程度,即在各位间添加的 0 的个数
);
// Host 函数:_combineDigit(混悬一个三维向量)
// 混悬一个三维向量。所谓混悬,即将三个数据从低位到高位排列,混悬后得到的结果的
// 第 0 至 2 位为输入参数中 x、y、z 分量的第 0 位,结果的第 3 至 5 位位输入参数
// 中 x、y、z 分量的第 1 位,以此类推。这里可以选择混悬的数量,可选值包括 1、
// 2、3,分别表示混悬 x 分量(即不混悬),混悬 x、y 分量,以及混悬 x、y、z 分
// 量。
static __host__ unsigned int // 混悬的结果
_combineDigit(
dim3 num, // 输入的数据,三维向量
int count // 需要混悬的数量,可选值为 1、2、3,对于小于 1
// 的数,函数直接返回 num 中的 x 分量,对于大于 3
// 的数,则按照 3 处理。
);
// Host 函数:_defIsLegalParam(标准参数判断函数)
// 给出一种一般情况下判断参数是否为合法的函数,对于没有特殊要求的模板类型,可以
// 直接使用该函数而不需要自己再重新写一个函数。
static __host__ bool // 返回值:由于没有给定具体的模板类型,该函数会恒返回真
_defIsLegalParam(
dim3 size, // 尺寸参数
void *privated // 专属参数
);
// Host 函数:_defGetHashIndex(标准 Hash 算法函数)
// 该函数根据 size 的三维数据进行混合,然后通过取模运算得到合理的 Hash 值,该函
// 数并不将 private 的值考虑进 Hash 值的计算过程中。
static __host__ int // 返回值:Hash 值,如果出现错误则该函数返回负数。
_defGetHashIndex(
dim3 size, // 尺寸参数
void *privated // 专属参数
);
// Host 函数:_defCreateTemplate(标准模板生成函数)
// 该函数只是用来作为演示这一类函数的书写格式,并不会真正的返回一个模板,该函数
// 只会返回 NULL。
static __host__ Template * // 返回值:生成的模板,但该函数只会返回 NULL。
_defCreateTemplate(
dim3 size, // 尺寸参数
void *privated // 专属参数
);
// Host 函数:_defIsEqualSize(标准的尺寸相等判断函数)
// 该函数是标准的判断两个尺寸是否相等的函数。该函数通过比较两个尺寸参数的各个维
// 度上是否相等,以及两个专属参数是否地址相同来判断两个尺寸是否相同,这是一种最
// 通常的判断方式。
static __host__ bool // 返回值:给定的两个尺寸是否是一样的。
_defIsEqualSize(
dim3 size1, // 第一个尺寸的尺寸参数
void *privated1, // 第一个尺寸的专属参数
dim3 size2, // 第二个尺寸的尺寸参数
void *privated2 // 第二个尺寸的专属参数
);
// Host 函数:_defCopyPrivated(标准的专属参数拷贝函数)
// 该函数只是用来作为演示这一类函数的书写格式,并不会真正的进行拷贝工作,返回的
// 指针恒为 NULL。
static __host__ void * // 返回值:拷贝后和输入参数内容完全一致的新的地址空间的
// 指针,但本函数只会返回 NULL。
_defCopyPrivated(
void *privated // 待拷贝的专属参数。
);
// Host 函数:_defDeletePrivated(标准的专属参数释放函数)
// 该函数只是用来作为演示这一类函数的书写格式。对于 NULL 参数,该函数不进行任何
// 操作,对于非 NULL 参数,该函数也不会进行任何处理,因为使用 delete 释放
// void * 型指针会产生 Warning。
static __host__ bool // 返回值:是否释放成功,该函数如果参数是 NULL 则返回
// false。
_defDeletePrivated(
void *privated // 待释放的专属函数
);
// Host 函数:_stretchDigit(抻拉二进制数)
static __host__ unsigned int _stretchDigit(unsigned int num, int extent)
{
// 由于需要在结果中舍去高位数据,因此这里需要计算在给定的 extent 的情况下,
// 究竟旳多少位为有效位。
int maxbitlim = 8 * sizeof (unsigned int) / (extent + 1);
// BIT 游标,用来掩出指定位的 BIT 数据。
unsigned int bitvernier = 0x1;
// 存放结果的数据。
unsigned int resnum = 0x0;
// 从低位开始循环,依次计算每一位的情况,然后逐位赋值给结果。
for (int i = 0; i < maxbitlim; i++) {
// 通过 BIT 游标掩出当前位的 BIT 数据,然后左移,使得在结果中它和相邻位
// 之间出现了 extent 个 0。
resnum |= ((num & bitvernier) << (extent * i));
// 左移一位游标,以便下次循环时计算的是更高一位的数据。
bitvernier <<= 1;
}
// 计算完毕,返回结果数据。
return resnum;
}
// Host 函数:_combineDigit(混悬一个三维向量)
static __host__ unsigned int _combineDigit(dim3 num, int count)
{
// 存放输出结果的累加变量。
unsigned int res = 0u;
// 如果 count <= 1,则不需要任何处理,直接将 num.x 返回。
if (count <= 1)
return num.x;
// 如果 count >= 3,则将其归一化到 3,并先行处理 num.z(抻拉后通过移位放入
// 相应的位置)
if (count >= 3) {
count = 3;
res = (_stretchDigit(num.z, count) << 2);
}
// 此时可以确定 count >= 2,因此这里对 num.x 和 num.y 进行抻拉,并组合到一
// 起。
res |= _stretchDigit(num.x, count);
res |= (_stretchDigit(num.y, count) << 1);
// 计算完毕,将结果返回。
return res;
}
// Host 全局变量:_defTemplateVendor(标准模板生成器)
// 归纳定义标准模板生成所需要的函数。
static TFTemplateVendor _defTemplateVendor = {
_defIsLegalParam,
_defGetHashIndex,
_defCreateTemplate,
_defIsEqualSize,
_defCopyPrivated,
_defDeletePrivated
};
// Host 函数:_defIsLegalParam(标准参数判断函数)
static __host__ bool _defIsLegalParam(dim3/* size*/, void * /*privated*/)
{
// 该函数直接返回
return true;
}
// Host 函数:_defGetHashIndex(标准 Hash 算法函数)
static __host__ int _defGetHashIndex(dim3 size, void * /*privated*/)
{
// 直接将三维的 size 混悬后的数据返回。
return _combineDigit(size, 3) % TF_VOL_SHAPE;
}
// Host 函数:_defCreateTemplate(标准模板生成函数)
static __host__ Template *_defCreateTemplate(dim3/* size*/,
void * /*privated*/)
{
// 该函数只是用来作为演示这一类函数的书写格式,并不会真正的返回一个模板,该
// 函数只会返回 NULL。
return NULL;
}
// Host 函数:_defIsEqualSize(标准的尺寸相等判断函数)
static __host__ bool _defIsEqualSize(dim3 size1, void *privated1,
dim3 size2, void *privated2)
{
// 本函数采用了一种最为通用的尺寸判断方式,即尺寸参数中各个维度要想等,并且
// 专属参数的地址值要相等,才能酸味两个尺寸的相等。在实际中具体对于某个类型
// 的模板来说,这个条件可能会进行一定程度的放宽。
if (size1.x == size2.x && size1.y == size2.y && size1.z == size2.z &&
privated1 == privated2)
return true;
else
return false;
}
// Host 函数:_defCopyPrivated(标准的专属参数拷贝函数)
static __host__ void *_defCopyPrivated(void * /*privated*/)
{
// 本演示函数只会返回 NULL。
return NULL;
}
// Host 函数:_defDeletePrivated(标准的专属参数释放函数)
static __host__ bool _defDeletePrivated(void *privated)
{
// 如果输入的参数是 NULL,则直接返回。
if (privated == NULL)
return false;
// 使用 delete 关键字释放 privated,然后返回。
//delete privated;
return true;
}
// 方形模板的定义:
// Host 函数:_boxIsLegalParam(方形模板参数判断函数)
// 检查方形模板的参数是否合格,合格的模板要求尺寸参数的 z 分量为 1,专属参数为
// NULL(因为方形模板没有专属参数)
static __host__ bool // 返回值:如果模板合法,则返回 true,否则返回 false
_boxIsLegalParam(
dim3 size, // 尺寸参数
void *privated // 专属参数
);
// Host 函数:_boxGetHashIndex(方形模板 Hash 算法函数)
// 方形模板的 Hash 算法。该算法混悬尺寸参数的 x 和 y 分量,由于方形模板没有专属
// 参数,所以在计算 Hash 的时候没有考虑专属参数。
static __host__ int // 返回值:Hash 值,如果出现错误则该函数返回负数。
_boxGetHashIndex(
dim3 size, // 尺寸参数
void *privated // 专属参数
);
// Host 函数:_boxCreateTemplate(方形模板生成函数)
// 生成方形模板的函数。
static __host__ Template * // 返回值:生成的模板,若无法生成模板会返回 NULL。
_boxCreateTemplate(
dim3 size, // 尺寸参数
void *privated // 专属参数
);
// Host 函数:_boxIsEqualSize(方形模板的尺寸相等判断函数)
// 方形模板使用了尺寸中的两个维度,因此该函数只会检查尺寸参数的 x 和 y 两个维度
// 是否相等。
static __host__ bool // 返回值:给定的两个尺寸是否是一样的。
_boxIsEqualSize(
dim3 size1, // 第一个尺寸的尺寸参数
void *privated1, // 第一个尺寸的专属参数
dim3 size2, // 第二个尺寸的尺寸参数
void *privated2 // 第二个尺寸的专属参数
);
// Host 函数:_boxCopyPrivated(方形模板的专属参数拷贝函数)
// 由于方形模板没有专属参数,并不会真正的进行拷贝工作,会直接返回 NULL。
static __host__ void * // 返回值:直接返回 NULL。
_boxCopyPrivated(
void *privated // 待拷贝的专属参数。
);
// Host 函数:_boxDeletePrivated(方形模板的专属参数释放函数)
// 由于方形模板没有专属参数,所以该函数不会释放任何的内存空间。如果给定的
// privated 不是 NULL,则该函数会直接返回 NULL。
static __host__ bool // 返回值:如果参数为 NULL 返回 true,否则返回 false。
_boxDeletePrivated(
void *privated // 待释放的专属函数
);
// Host 全局变量:_boxTemplateVendor(方形模板生成器)
// 归纳定义方形模板生成所需要的函数。
static TFTemplateVendor _boxTemplateVendor = {
_boxIsLegalParam,
_boxGetHashIndex,
_boxCreateTemplate,
_boxIsEqualSize,
_boxCopyPrivated,
_boxDeletePrivated
};
// Host 函数:_boxIsLegalParam(方形模板参数判断函数)
static __host__ bool _boxIsLegalParam(dim3 size, void *privated)
{
// 如果尺寸参数的 z 分量不等于 1,或者专属变量不为 NULL 则该判定该参数是非
// 法参数。
if (size.z != 1 || privated != NULL)
return false;
// 如果 x 和 y 分量的尺寸小于 1,该参数也会被判定为非法。
else if (size.x < 1 || size.y < 1)
return false;
else
return true;
}
// Host 函数:_boxGetHashIndex(方形模板 Hash 算法函数)
static __host__ int _boxGetHashIndex(dim3 size, void * /*privated*/)
{
// 混悬尺寸参数的 x 和 y 分量,由于方形模板没有专属参数,所以在计算 Hash 的
// 时候没有考虑专属参数。
return _combineDigit(size, 2) % TF_VOL_SHAPE;
}
// Host 函数:_boxCreateTemplate(方形模板生成函数)
static __host__ Template *_boxCreateTemplate(dim3 size, void * /*privated*/)
{
// 定义临时变指针量 boxtpl,用于模板返回值
Template *boxtpl;
// 申请一个新的模板
int errcode;
errcode = TemplateBasicOp::newTemplate(&boxtpl);
if (errcode != NO_ERROR)
return NULL;
// 计算模版中点的数量,并在 Host 上获得存储空间
int count = size.x * size.y;
errcode = TemplateBasicOp::makeAtHost(boxtpl, count);
if (errcode != NO_ERROR) {
TemplateBasicOp::deleteTemplate(boxtpl);
return NULL;
}
// 将坐标的指针和附加数据的指针取出,然后通过指针游标的方式写入数据
int *ptsdata = boxtpl->tplData;
float *attdata = ATTACHED_DATA(boxtpl);
// 计算方形模板中点集的范围。这里设定方形的中心点为原点。
int startc = -((size.x - 1) / 2), endc = size.x / 2;
int startr = -((size.y - 1) / 2), endr = size.y / 2;
// 为了使所有坐标点的附加数据加和值为 1,这里取坐标点数量的倒数为每个点的附
// 加数据。
float attdataconst = 1.0f / count;
// 迭代赋值模板中的点集坐标数据和附加数据
for (int r = startr; r < endr; r++) {
for (int c = startc; c < endc; c++) {
*(ptsdata++) = c;
*(ptsdata++) = r;
*(attdata++) = attdataconst;
}
}
// 返回方形模板指针
return boxtpl;
}
// Host 函数:_boxIsEqualSize(方形模板的尺寸相等判断函数)
static __host__ bool _boxIsEqualSize(dim3 size1, void * /*privated1*/,
dim3 size2, void * /*privated2*/)
{
// 由于方形只有两维的尺寸,因此这里只考虑 x 和 y 分量
if (size1.x == size2.x && size1.y == size2.y)
return true;
else
return false;
}
// Host 函数:_boxCopyPrivated(方形模板的专属参数拷贝函数)
static __host__ void * _boxCopyPrivated(void * /*privated*/)
{
// 由于方形模板没有专属参数,因此不进行任何的拷贝工作,直接返回。
return NULL;
}
// Host 函数:_boxDeletePrivated(方形模板的专属参数释放函数)
static __host__ bool _boxDeletePrivated(void *privated)
{
// 由于方形模板没有专属参数,因此不进行任何的内存释放,只是象征性的进行一下
// 判断和返回结果。
if (privated == NULL)
return true;
else
return false;
}
// 圆形模板的定义:
// Host 函数:_circleIsLegalParam(圆形模板参数判断函数)
// 检查圆形模板的参数是否合格,合格的模板要求尺寸参数的 x 和 y 分量必须相等,且
// 大于等于 3,z 分量为 1,专属参数为 NULL(因为圆形模板没有专属参数)
static __host__ bool // 返回值:如果模板合法,则返回 true,否则返回 false
_circleIsLegalParam(
dim3 size, // 尺寸参数
void *privated // 专属参数
);
// Host 函数:_circleGetHashIndex(圆形模板 Hash 算法函数)
// 圆形模板的 Hash 算法。该函数只是将尺寸参数的 x 分量取模。
static __host__ int // 返回值:Hash 值,如果出现错误则该函数返回负数。
_circleGetHashIndex(
dim3 size, // 尺寸参数
void *privated // 专属参数
);
// Host 函数:_circleCreateTemplate(圆形模板生成函数)
// 生成圆形模板的函数。
static __host__ Template * // 返回值:生成的模板,若无法生成模板会返回 NULL。
_circleCreateTemplate(
dim3 size, // 尺寸参数
void *privated // 专属参数
);
// Host 函数:_circleIsEqualSize(圆形模板的尺寸相等判断函数)
// 圆形模板使用了尺寸中的两个维度,因此该函数只会检查尺寸参数的 x 维度是否相
// 等。
static __host__ bool // 返回值:给定的两个尺寸是否是一样的。
_circleIsEqualSize(
dim3 size1, // 第一个尺寸的尺寸参数
void *privated1, // 第一个尺寸的专属参数
dim3 size2, // 第二个尺寸的尺寸参数
void *privated2 // 第二个尺寸的专属参数
);
// Host 函数:_circleCopyPrivated(圆形模板的专属参数拷贝函数)
// 由于圆形模板没有专属参数,并不会真正的进行拷贝工作,会直接返回 NULL。
static __host__ void * // 返回值:直接返回 NULL。
_circleCopyPrivated(
void *privated // 待拷贝的专属参数。
);
// Host 函数:_circleDeletePrivated(圆形模板的专属参数释放函数)
// 由于圆形模板没有专属参数,所以该函数不会释放任何的内存空间。如果给定的
// privated 不是 NULL,则该函数会直接返回 NULL。
static __host__ bool // 返回值:如果参数为 NULL 返回 true,否则返回 false。
_circleDeletePrivated(
void *privated // 待释放的专属函数
);
// Host 全局变量:_circleTemplateVendor(圆形模板生成器)
// 归纳定义圆形模板生成所需要的函数。
static TFTemplateVendor _circleTemplateVendor = {
_circleIsLegalParam,
_circleGetHashIndex,
_circleCreateTemplate,
_circleIsEqualSize,
_circleCopyPrivated,
_circleDeletePrivated
};
// Host 函数:_circleIsLegalParam(圆形模板参数判断函数)
static __host__ bool _circleIsLegalParam(dim3 size, void *privated)
{
// 如果尺寸参数的 z 分量不等于 1,或者专属变量不为 NULL 则该判定该参数是非
// 法参数。
if (size.z != 1 || privated != NULL)
return false;
// 如果 x 和 y 分量的尺寸小于 3,或者 x 和 y 分量不想等,该参数也会被判定为
// 非法。
else if (size.x < 3 || /*size.y < 3 || */size.x != size.y)
return false;
// 如果尺寸参数为偶数,该参数也会被判定为非法。
else if (size.x % 2 == 0/* || size.y % 2 == 0*/)
return false;
else
return true;
}
// Host 函数:_circleGetHashIndex(圆形模板 Hash 算法函数)
static __host__ int _circleGetHashIndex(dim3 size, void * /*privated*/)
{
// 这里只是用了 x 分量,由于只可能是 x 大于 2 的奇数,因此这里将其除以 2,
// 可以更高效的利用空间。
return ((size.x - 1) / 2) % TF_VOL_SHAPE;
}
// Host 函数:_circleCreateTemplate(圆形模板生成函数)
static __host__ Template *_circleCreateTemplate(dim3 size, void * /*privated*/)
{
// 得到圆形模板半径
int radius = (size.x - 1) / 2;
radius = ((radius < 1) ? 1 : radius);
int radius2 = radius * radius;
// 声明一些局部变量,用来保存模板的临时值。这些变量包括 tmptpldata,用来保
// 存临时的坐标点信息,这段内存空间申请大小为所求圆形的外接矩形的大小;
// count 为游标,记录下一个 tmptpldata 的存储下标,当整个求解完成后,该值存
// 储信息为整个圆形模板所占用的内存字数;x 和 y 为当前迭代的坐标,其起始位
// 置为 (0, radius)。
size_t maxdatasize = 2 * (2 * radius + 1) * (2 * radius + 1);
int *tmptpldata = new int[maxdatasize];
int count = 0;
int x = 0, y = radius;
// 如果临时数据空间申请失败,则无法进行后续的操作,则只能报错返回。
if (tmptpldata == NULL)
return NULL;
// 整个迭代过程采用经典的八分圆迭代法,即只迭代推导出圆的右上 1/8 部分(依
// 右手坐标来说),即从 (0, raidus) 至 (sqrt(radius), sqrt(radius)) 段的
// 点,其余的点都通过圆自身的对称性映射得到。如果迭代得到 (x, y) 为圆上一
// 点,那么 (-x, y)、(x, -y)、(-x, -y)、(y, x)、(-y, x)、(y, -x)、(-y, -x)
// 也都将是圆上的点。由于本算法要得到一个实心圆体,所以,每得到一对关于 x
// 轴的对称点后,则填充之间的所有点。
// 这是算法的第一步,将 (0, radius) 和 (0, -radius) 计入其中。之所以要特殊
// 对待是因为没有 0 和 -0 之区别,下段代码的 for 循环也是处理特殊的 0 点。
tmptpldata[count++] = 0;
tmptpldata[count++] = radius;
tmptpldata[count++] = 0;
tmptpldata[count++] = -radius;
// 计入 (radius, 0) 和 (-radius, 0) 并填充该行内这两点间的所有点。
for (int ix = -radius; ix <= radius; ix++) {
tmptpldata[count++] = ix;
tmptpldata[count++] = 0;
}
// 当 x < y 时,(x, y) 处于圆的右上方 1/8 的部分,我们只计算此 1/8 的部分,
// 其他的部分通过圆的对称性计算出来。
while (x < y) {
// 计算下一个点。这里对于下一个点只有两种可能,一种是 (x + 1, y),另一
// 种是 (x + 1, y - 1)。具体选择这两种中的哪一个,要看它们谁更接近真实
// 的圆周曲线。这段代码就是计算这两种情况距离圆周曲线的距离平方(开平方
// 计算太过复杂,却不影响这里的结果,因此我们没有进行开平方计算,而使用
// 距离的平方值作为判断的条件)。
x++;
int d1 = x * x + y * y - radius2;
int d2 = x * x + (y - 1) * (y - 1) - radius2;
d1 = ((d1 < 0) ? -d1 : d1);
d2 = ((d2 < 0) ? -d2 : d2);
// 比较两个候选点相距圆周曲线的距离
if (d1 < d2) {
// 如果点 (x + 1, y) 更加接近圆周曲线,则将首先将 90 度对称点的四个
// 点写入到坐标集中,这里没有进行内部的填充,是水平方向上的内部点已
// 经在前些步骤时被填充了
tmptpldata[count++] = x;
tmptpldata[count++] = y;
tmptpldata[count++] = -x;
tmptpldata[count++] = y;
tmptpldata[count++] = x;
tmptpldata[count++] = -y;
tmptpldata[count++] = -x;
tmptpldata[count++] = -y;
} else {
// 如果点 (x + 1, y - 1) 更加接近圆周曲线,则需要将 (-x - 1, y - 1)
// 和 (x + 1, y - 1),以及 (-x - 1, 1 - y) 和 (x + 1, 1 - y) 之间
// (含)的所有点都添加到坐标集中。
y--;
// 由于此前进行了 x++ 操作,所以 y-- 操作会导致 x > y 的情况产生,
// 显然 x > y 的情况都已经被其他的 45 度对称点所处理,因此,这里需
// 惊醒该检查,一旦发现 x > y 则直接跳出循环。
if (x > y)
break;
// 将对应的 y > 0 和 y < 0 所在的横向内部坐标点计入到坐标集中。
for (int ix = -x; ix <= x; ix++) {
tmptpldata[count++] = ix;
tmptpldata[count++] = y;
tmptpldata[count++] = ix;
tmptpldata[count++] = -y;
}
}
// 处理 45 度的各个对称点的情况,因为每走一步都有 x + 1 的操作,所以处
// 理 45 度对称点的时候都需要将对应的两点之间的横向内部点进行填充。但这
// 里有一个例外的情况,就是当 x >= y 时,该行的点已经在其他计算的 90 度
// 对称点中进行了处理,所有这些时候,就不需要在对 45 度对称点进行处理
// 了。
if (x < y) {
for (int iy = -y; iy <= y; iy++) {
tmptpldata[count++] = iy;
tmptpldata[count++] = x;
tmptpldata[count++] = iy;
tmptpldata[count++] = -x;
}
}
}
// 申请一个新的 Template 用来承装这些圆形模板中的坐标点集。
Template *restpl;
int errcode;
// 申请新的模板。
errcode = TemplateBasicOp::newTemplate(&restpl);
if (errcode != NO_ERROR) {
// 如果出现错误需要释放掉申请的临时空间以防止内存泄漏。
delete[] tmptpldata;
return NULL;
}
// 在 Device 内存上申请合适大小的空间,用来存放坐标数据。
int ptscnt = count / 2;
errcode = TemplateBasicOp::makeAtHost(restpl, ptscnt);
if (errcode != NO_ERROR) {
// 如果操作失败,需要释放掉之前申请的临时坐标集数据空间和模板数据结构,
// 以防止内存泄漏。
TemplateBasicOp::deleteTemplate(restpl);
delete[] tmptpldata;
return NULL;
}
// 将临时坐标集中的坐标数据拷贝到模板的坐标数据中。
memcpy(restpl->tplData, tmptpldata, count * sizeof (int));
// 取出模板的附加数据,然后为附加数据赋值为坐标点个数的倒数。
float attdataconst = 1.0f / ptscnt;
float *attdata = ATTACHED_DATA(restpl);
// 用迭代的方式将数据写入到附加数据中。
for (int i = 0; i < ptscnt; i++) {
*(attdata++) = attdataconst;
}
// 坐标数据已经拷贝到了需要返回给用户的模板中,因此,这个临时坐标集数据空间
// 已经不再需要了,因此需要将其释放掉。
delete[] tmptpldata;
// 处理完毕,返回已经装配好的模板。
return restpl;
}
// Host 函数:_circleIsEqualSize(圆形模板的尺寸相等判断函数)
static __host__ bool _circleIsEqualSize(dim3 size1, void * /*privated1*/,
dim3 size2, void * /*privated2*/)
{
// 由于圆形只有一维的尺寸,因此这里只考虑 x 分量
if (size1.x == size2.x/* && size1.y == size2.y*/)
return true;
else
return false;
}
// Host 函数:_circleCopyPrivated(圆形模板的专属参数拷贝函数)
static __host__ void * _circleCopyPrivated(void * /*privated*/)
{
// 由于圆形模板没有专属参数,因此不进行任何的拷贝工作,直接返回。
return NULL;
}
// Host 函数:_circleDeletePrivated(圆形模板的专属参数释放函数)
static __host__ bool _circleDeletePrivated(void *privated)
{
// 由于圆形模板没有专属参数,因此不进行任何的内存释放,只是象征性的进行一下
// 判断和返回结果。
if (privated == NULL)
return true;
else
return false;
}
// 环形模板的定义:
// Host 函数:_arcIsLegalParam(环形模板参数判断函数)
// 检查环形模板的参数是否合格,合格的模板要求尺寸参数的 z 分量为 1,专属参数为
// NULL(因为环形模板没有专属参数);此外环形模板要求 x 和 y 分量尺寸必须大于等
// 于 1,且 y 分量应该大于 x 分量(用 y 分量来定义外环,x 分量来定义内环)。
static __host__ bool // 返回值:如果模板合法,则返回 true,否则返回 false
_arcIsLegalParam(
dim3 size, // 尺寸参数
void *privated // 专属参数
);
// Host 函数:_arcGetHashIndex(环形模板 Hash 算法函数)
// 环形模板的 Hash 算法。该算法混悬尺寸参数的 x 和 y 分量,由于环形模板没有专属
// 参数,所以在计算 Hash 的时候没有考虑专属参数。
static __host__ int // 返回值:Hash 值,如果出现错误则该函数返回负数。
_arcGetHashIndex(
dim3 size, // 尺寸参数
void *privated // 专属参数
);
// Host 函数:_arcCreateTemplate(环形模板生成函数)
// 生成环形模板的函数。
static __host__ Template * // 返回值:生成的模板,若无法生成模板会返回 NULL。
_arcCreateTemplate(
dim3 size, // 尺寸参数
void *privated // 专属参数
);
// Host 函数:_arcIsEqualSize(环形模板的尺寸相等判断函数)
// 环形模板使用了尺寸中的两个维度,因此该函数只会检查尺寸参数的 x 和 y 两个维度
// 是否相等。
static __host__ bool // 返回值:给定的两个尺寸是否是一样的。
_arcIsEqualSize(
dim3 size1, // 第一个尺寸的尺寸参数
void *privated1, // 第一个尺寸的专属参数
dim3 size2, // 第二个尺寸的尺寸参数
void *privated2 // 第二个尺寸的专属参数
);
// Host 函数:_arcCopyPrivated(环形模板的专属参数拷贝函数)
// 由于环形模板没有专属参数,并不会真正的进行拷贝工作,会直接返回 NULL。
static __host__ void * // 返回值:直接返回 NULL。
_arcCopyPrivated(
void *privated // 待拷贝的专属参数。
);
// Host 函数:_arcDeletePrivated(环形模板的专属参数释放函数)
// 由于环形模板没有专属参数,所以该函数不会释放任何的内存空间。如果给定的
// privated 不是 NULL,则该函数会直接返回 NULL。
static __host__ bool // 返回值:如果参数为 NULL 返回 true,否则返回 false。
_arcDeletePrivated(
void *privated // 待释放的专属函数
);
// Host 全局变量:_arcTemplateVendor(环形模板生成器)
// 归纳定义环形模板生成所需要的函数。
static TFTemplateVendor _arcTemplateVendor = {
_arcIsLegalParam,
_arcGetHashIndex,
_arcCreateTemplate,
_arcIsEqualSize,
_arcCopyPrivated,
_arcDeletePrivated
};
// Host 函数:_arcIsLegalParam(环形模板参数判断函数)
static __host__ bool _arcIsLegalParam(dim3 size, void *privated)
{
// 如果尺寸参数的 z 分量不等于 1,或者专属变量不为 NULL 则该判定该参数是非
// 法参数。
if (size.z != 1 || privated != NULL)
return false;
// 如果 x 和 y 分量的尺寸小于 1,或者 x 分量大于 y 分量,该参数也会被判定为
// 非法。
else if (size.x < 1 || size.y <= size.x)
return false;
// 由于 size 表示直径,因此,这里要求两个同心圆的直径必须皆为奇数。
else if (size.x % 2 == 0 || size.y % 2 == 0)
return false;
else
return true;
}
// Host 函数:_arcGetHashIndex(环形模板 Hash 算法函数)
static __host__ int _arcGetHashIndex(dim3 size, void * /*privated*/)
{
// 混悬尺寸参数的 x 和 y 分量,由于方形模板没有专属参数,所以在计算 Hash 的
// 时候没有考虑专属参数。由于 x 和 y 分量肯定为奇数,为了保证 Hash 算法的满
// 满映射,这里分别将 x 和 y 尺寸分量右移一位,抛弃其最低位。
size.x >>= 1;
size.y >>= 1;
return _combineDigit(size, 2) % TF_VOL_SHAPE;
}
// Host 函数:_arcCreateTemplate(环形模板生成函数)
static __host__ Template *_arcCreateTemplate(dim3 size, void * /*privated*/)
{
// 计算得到环形模板内侧圆的半径,以及半径的平方值。
int sr = (size.x - 1) / 2;
sr = ((sr < 1) ? 1 : sr);
int sr2 = sr * sr;
// 计算得到环形模板外侧圆的半径,以及半径的平方值。
int lr = (size.y - 1) / 2;
lr = ((lr <= sr) ? (sr + 1) : lr);
int lr2 = lr * lr;
// 申请用于存放坐标点的临时空间,为了防止内存越界访问,我们申请了最大可能的
// 空间,即外侧圆的外接矩形的尺寸。
int maxsize = 2 * (2 * lr + 1) * (2 * lr + 1);
int *tmptpldata = new int[maxsize];
// 初始化迭代求点所必须的一些局部变量。
int count = 0; // 当前下标。随着各个坐标点的逐渐求出,该值不断递增,用来记
// 录下一个存储位置的下标值
int x = 0, sy = sr, ly = lr; // 算法依 x 为主迭代变量,自增 x 后求的合适的
// y 值,由于存在内外两侧圆形,因此,这里设定
// 两个 y 的变量,sy 表示内侧圆的 y 值,ly 表
// 示外侧圆的 y 值。
// 整个迭代过程与求解圆形模板的方式相同,采用八分圆方法,通过迭代求解 1 / 8
// 个圆形,然后通过圆的对称性得到圆的另外部分,在每求解一个坐标后,填充两个
// 圆之间的部分坐标。
// 由于内侧圆会比外侧圆更快的达到结束点,因此在达到结束点后,则内测圆取直线
// x - y = 0 上的点。这样在填充内部点的时候才不会重复处理,将多余的重复点加
// 入到坐标点集中。
// _|____ /
// |求解\ / <-- 直线 x - y = 0
// |区域 \/
// _|___ /\
// | \/ \
// | /\ \ <-- 外侧圆
// | / \ <-- 内侧圆
// | / | |
// _|/_____|__|__
// 需要实现处理坐标轴上的点,由于 0 不分正负,所以不能通过下面迭代 while 循
// 环的通用方法来实现。将两个半径之间的点加入坐标点集。
for (int y = sr; y < lr; y++) {
tmptpldata[count++] = 0;
tmptpldata[count++] = y;
tmptpldata[count++] = 0;
tmptpldata[count++] = -y;
tmptpldata[count++] = y;
tmptpldata[count++] = 0;
tmptpldata[count++] = -y;
tmptpldata[count++] = 0;
}
// 迭代直到外侧圆的 1 / 8 区域求解完毕。
while (x < ly) {
// 自加 x,然后分别求解内侧圆和外侧圆的 y 坐标。
x++;
// 如果内侧圆还没有求解完成,则需要在两个可能的 y 坐标中选择 y 坐标。
// 注意,由于上面的自加过程,x 已经更新为下一点的 x 坐标,但该判断语句
// 需要使用原来的 x 坐标,因此,这里使用 x - 1 做为判断变量。
if (x - 1 < sy) {
// 从两个可能的下一点坐标 (x + 1, y) 和 (x + 1, y - 1) 中选择一个更
// 加接近圆形方程的坐标点做为下一点的坐标。
int sd1 = abs(x * x + sy * sy - sr2);
int sd2 = abs(x * x + (sy - 1) * (sy - 1) - sr2);
sy = (sd1 <= sd2) ? sy : (sy - 1);
}
// 如果 x >= sy 说明内侧圆已经求解完毕,因此这时应该按照直线 x - y = 0
// 来计算,这才不会造成重复点。
if (x >= sy)
sy = x;
// 求解外侧圆的下一个点坐标,从两个可能的下一点坐标 (x + 1, y) 和
// (x + 1, y - 1) 中选择一个更加接近圆形方程的坐标点做为下一点的坐标。
int ld1 = abs(x * x + ly * ly - lr2);
int ld2 = abs(x * x + (ly - 1) * (ly - 1) - lr2);
ly = (ld1 <= ld2) ? ly : (ly - 1);
// 如果 x > ly 说明外侧圆已经求解完毕,因此跳出迭代。
if (x > ly)
break;
// 将两个圆(或者外侧圆与直线 x - y = 0)当前点之间的坐标写入到坐标点集
// 中。考虑到圆的对称性关系,这里将 8 个对称坐标点也同时写入了坐标点
// 集。
for (int y = sy; y < ly; y++) {
tmptpldata[count++] = x;
tmptpldata[count++] = y;
tmptpldata[count++] = -x;
tmptpldata[count++] = y;
tmptpldata[count++] = x;
tmptpldata[count++] = -y;
tmptpldata[count++] = -x;
tmptpldata[count++] = -y;
// 如果当前点的 x 和 y 相等,那么其关于直线 x - y = 0 或 x + y = 0
// 的对称点就是其自身,因此没有必要在次加入到坐标点集中。
if (x == y)
continue;
tmptpldata[count++] = y;
tmptpldata[count++] = x;
tmptpldata[count++] = -y;
tmptpldata[count++] = x;
tmptpldata[count++] = y;
tmptpldata[count++] = -x;
tmptpldata[count++] = -y;
tmptpldata[count++] = -x;
}
}
// 申请一个新的 Template 用来承装这些圆形模板中的坐标点集。
Template *restpl;
int errcode;
// 申请新的模板。
errcode = TemplateBasicOp::newTemplate(&restpl);
if (errcode != NO_ERROR) {
// 如果出现错误需要释放掉申请的临时空间以防止内存泄漏。
delete[] tmptpldata;
return NULL;
}
// 在 Device 内存上申请合适大小的空间,用来存放坐标数据。
int ptscnt = count / 2;
errcode = TemplateBasicOp::makeAtHost(restpl, ptscnt);
if (errcode != NO_ERROR) {
// 如果操作失败,需要释放掉之前申请的临时坐标集数据空间和模板数据结构,
// 以防止内存泄漏。
TemplateBasicOp::deleteTemplate(restpl);
delete[] tmptpldata;
return NULL;
}
// 将临时坐标集中的坐标数据拷贝到模板的坐标数据中。
memcpy(restpl->tplData, tmptpldata, count * sizeof (int));
// 取出模板的附加数据,然后为附加数据赋值为坐标点个数的倒数。
float attdataconst = 1.0f / ptscnt;
float *attdata = ATTACHED_DATA(restpl);
// 用迭代的方式将数据写入到附加数据中。
for (int i = 0; i < ptscnt; i++) {
*(attdata++) = attdataconst;
}
// 坐标数据已经拷贝到了需要返回给用户的模板中,因此,这个临时坐标集数据空间
// 已经不再需要了,因此需要将其释放掉。
delete[] tmptpldata;
// 处理完毕,返回已经装配好的模板。
return restpl;
}
// Host 函数:_arcIsEqualSize(环形模板的尺寸相等判断函数)
static __host__ bool _arcIsEqualSize(dim3 size1, void * /*privated1*/,
dim3 size2, void * /*privated2*/)
{
// 由于方形只有两维的尺寸,因此这里只考虑 x 和 y 分量
if (size1.x == size2.x && size1.y == size2.y)
return true;
else
return false;
}
// Host 函数:_arcCopyPrivated(环形模板的专属参数拷贝函数)
static __host__ void *_arcCopyPrivated(void * /*privated*/)
{
// 由于环形模板没有专属参数,因此不进行任何的拷贝工作,直接返回。
return NULL;
}
// Host 函数:_arcDeletePrivated(环形模板的专属参数释放函数)
static __host__ bool _arcDeletePrivated(void *privated)
{
// 由于环形模板没有专属参数,因此不进行任何的内存释放,只是象征性的进行一下
// 判断和返回结果。
if (privated == NULL)
return true;
else
return false;
}
// 高斯模板的定义:
// Host 函数:_gaussIsLegalParam(高斯模板参数判断函数)
// 检查高斯模板的参数是否合格,合格的模板要求尺寸参数的 z 分量为 1,专属参数不
// 能为NULL;此外高斯模板还要求 x 和 y 分量尺寸必须大于等于 1,且 y 分量必须等
// 于 x 分量;另外高斯模板要求尺寸必须为奇数。
static __host__ bool // 返回值:如果模板合法,则返回 true,否则返回 false
_gaussIsLegalParam(
dim3 size, // 尺寸参数
void *privated // 专属参数
);
// Host 函数:_gaussGetHashIndex(高斯模板 Hash 算法函数)
// 高斯模板的 Hash 算法。该算法之使用尺寸参数的 x 分量,由于高斯模板使用专属
// 参数,所以在计算 Hash 的时候也考虑专属参数。
static __host__ int // 返回值:Hash 值,如果出现错误则该函数返回负数。
_gaussGetHashIndex(
dim3 size, // 尺寸参数
void *privated // 专属参数
);
// Host 函数:_gaussCreateTemplate(高斯模板生成函数)
// 生成高斯模板的函数。
static __host__ Template * // 返回值:生成的模板,若无法生成模板会返回 NULL。
_gaussCreateTemplate(
dim3 size, // 尺寸参数
void *privated // 专属参数
);
// Host 函数:_gaussIsEqualSize(高斯模板的尺寸相等判断函数)
// 高斯模板使用了尺寸中的一个维度,因此该函数只会检查尺寸参数的 x 维度是否相
// 等。同时还检查了 privated 在 float 型数据的条件下是否相等(即两个数的差的绝
// 对值小于某一个很小的正数)
static __host__ bool // 返回值:给定的两个尺寸是否是一样的。
_gaussIsEqualSize(
dim3 size1, // 第一个尺寸的尺寸参数
void *privated1, // 第一个尺寸的专属参数
dim3 size2, // 第二个尺寸的尺寸参数
void *privated2 // 第二个尺寸的专属参数
);
// Host 函数:_gaussCopyPrivated(高斯模板的专属参数拷贝函数)
// 按照 float 型数据的方式,申请一个新的 float 型数据空间,并将 privated 所指向
// 的 float 型数据拷贝到新的空间中。
static __host__ void * // 返回值:如果 privated 为 NULL,则返回 NULL;否则返回
// 新申请的数据空间。
_gaussCopyPrivated(
void *privated // 待拷贝的专属参数。
);
// Host 函数:_gaussDeletePrivated(高斯模板的专属参数释放函数)
// 按照释放一个 float 型数据的地址空间的方法释放给定的空间。如果 privated 是
// NULL 则不进行任何操作。
static __host__ bool // 返回值:如果释放成功返回 true,对于 NULL 参数,返回
// false。
_gaussDeletePrivated(
void *privated // 待释放的专属函数
);
// Host 全局变量:_gaussTemplateVendor(高斯模板生成器)
// 归纳定义高斯模板生成所需要的函数。
static TFTemplateVendor _gaussTemplateVendor = {
_gaussIsLegalParam,
_gaussGetHashIndex,
_gaussCreateTemplate,
_gaussIsEqualSize,
_gaussCopyPrivated,
_gaussDeletePrivated
};
// Host 函数:_gaussIsLegalParam(高斯模板参数判断函数)
static __host__ bool _gaussIsLegalParam(dim3 size, void *privated)
{
// 由于高斯模板使用了 float 型的专属参数,因此需要保证 privated 不为 NULL。
if (privated == NULL)
return false;
// 由于只是用了一个维度的尺寸变量,这里要求 z 维度必须为 1。
else if (size.z != 1)
return false;
// 这里了要求尺寸必须大于等于 3 且 x 和 y 分量必须相等
else if (size.x < 3 || size.y != size.x)
return false;
// 这里还要求尺寸必须为奇数。
else if (size.x % 2 == 0)
return false;
// 如果附属数据的值等于 0,则判断为非法。
else if (fabs(*((float *)privated)) < 1.0e-8f)
return false;
else
return true;
}
// Host 函数:_gaussGetHashIndex(高斯模板 Hash 算法函数)
static __host__ int _gaussGetHashIndex(dim3 size, void *privated)
{
// 如果高斯模板的专属参数为 NULL,则返回 -1 报错。
if (privated == NULL)
return -1;
// 这里将尺寸参数的 x 分量和专属参数进行异或拼合。考虑到 size.x 为大于等于
// 3 的奇数,为了更加有效的利用存储空间,这里将 size.x / 2 - 1 以消除码距。
return ((size.x / 2 - 1) ^ (int)(fabs(*(float *)privated) * 10.0f)) %
TF_VOL_SHAPE;
}
// Host 函数:_gaussCreateTemplate(高斯模板生成函数)
static __host__ Template *_gaussCreateTemplate(dim3 size, void *privated)
{
// 如果专属参数为 NULL 则报错返回。
if (privated == NULL)
return NULL;
// 取出专属参数的值,该值在为 float 型,在生成高斯模板的过程中,称为 sigma
float sigma = *((float *)privated);
// 如果 sigma 值等于 0 则无法完成后续计算,报错退出。
if (fabs(sigma) < 1.0e-8f)
return NULL;
// 计算出 2 * sigma ^ 2,方便后续计算使用。
float sigma22 = 2 * sigma * sigma;
// 计算出半径尺寸。
int radius = size.x / 2;
if (radius < 1)
radius = 1;
// 根据半径推算出边长和模板中点的总数量。
int edgelen = 2 * radius + 1;
int maxptscnt = edgelen * edgelen;
// 申请新的模板
Template *restpl;
int errcode;
errcode = TemplateBasicOp::newTemplate(&restpl);
if (errcode != NO_ERROR)
return NULL;
// 为新模板申请内存空间。
errcode = TemplateBasicOp::makeAtHost(restpl, maxptscnt);
if (errcode != NO_ERROR) {
TemplateBasicOp::deleteTemplate(restpl);
return NULL;
}
// 取出存放坐标点和附属数据的内存空间指针,这样可以通过游标指针方便对数据空
// 间进行赋值操作。
int *tpldata = restpl->tplData;
float *attdata = ATTACHED_DATA(restpl);
// 首先将原点信息添加到模板中。
*(tpldata++) = 0;
*(tpldata++) = 0;
*(attdata++) = 1.0f;
// 之后依次从内向外逐渐的添加模板数据,之所以从内向外添加模板数据,是因为这
// 样做可以实现很好的复用性,即,模板的前 i * i 个元素就是边长为 i 的高斯模
// 板。
for (int i = 1; i <= radius; i++) {
// 计算指定半径下的各个坐标点信息。这里,利用对称性,计算处一个边的坐标
// 点然后利用对称性,得到其他的坐标点。为了防止拐角点重复计算,这里计算
// 的范围为 -i - 1 到 i。
for (int j = -i + 1; j <= i; j++) {
// 由于四个对称点的二阶模相等,因此预先计算出附属数据,以减少计算
// 量。
float curatt = exp(-(i * i + j * j) / sigma22);
// 将对称的四个坐标点添加到模板中,同时添加附属数据。
*(tpldata++) = i;
*(tpldata++) = j;
*(attdata++) = curatt;
*(tpldata++) = -i;
*(tpldata++) = j;
*(attdata++) = curatt;
*(tpldata++) = j;
*(tpldata++) = i;
*(attdata++) = curatt;
*(tpldata++) = j;
*(tpldata++) = -i;
*(attdata++) = curatt;
}
}
// 计算完毕,返回新的模板。
return restpl;
}
// Host 函数:_gaussIsEqualSize(高斯模板的尺寸相等判断函数)
static __host__ bool _gaussIsEqualSize(dim3 size1, void *privated1,
dim3 size2, void *privated2)
{
// 如果专属参数为 NULL,则恒返回 false 报错。
if (privated1 == NULL || privated2 == NULL)
return false;
// 如果按照 float 类型判断,两个专属参数不相等(即绝对值差大于某个小正
// 数),则判定为不相等。
if (fabs(*((float *)privated1) - *((float *)privated2)) >= 1.0e-6f)
return false;
// 如果尺寸参数不相等,则会判定为两个参数不相等
if (size1.x != size2.x/* || size1.y != size2.y*/)
return false;
return true;
}
// Host 函数:_gaussCopyPrivated(高斯模板的专属参数拷贝函数)
static __host__ void *_gaussCopyPrivated(void *privated)
{
// 如果专属参数为 NULL,则直接返回 NULL。
if (privated == NULL)
return NULL;
// 申请一个 float 型的空间
float *resptr = new float;
if (resptr == NULL)
return NULL;
// 然后将数据拷贝如这个空间内。
*resptr = *((float *)privated);
// 返回这个已拷贝了数据的新申请的空间地址。
return resptr;
}
// Host 函数:_gaussDeletePrivated(高斯模板的专属参数释放函数)
static __host__ bool _gaussDeletePrivated(void *privated)
{
// 如果专属参数为 NULL,则直接返回 NULL。
if (privated == NULL)
return false;
// 释放掉 privated 的空间。
delete (float *)privated;
return true;
}
// 欧式模板的定义:
// Host 函数:_euclideIsLegalParam(欧式模板参数判断函数)
// 检查欧式模板的参数是否合格,合格的模板要求尺寸参数的 z 分量为 1,专属参数不
// 能为NULL;此外欧式模板还要求 x 和 y 分量尺寸必须大于等于 1,且 y 分量必须等
// 于 x 分量或者 y 分量等于 1。
static __host__ bool // 返回值:如果模板合法,则返回 true,否则返回 false
_euclideIsLegalParam(
dim3 size, // 尺寸参数
void *privated // 专属参数
);
// Host 函数:_euclideGetHashIndex(欧式模板 Hash 算法函数)
// 欧式模板的 Hash 算法。该算法之使用尺寸参数的 x 分量,由于欧式模板使用专属
// 参数,所以在计算 Hash 的时候也考虑专属参数。
static __host__ int // 返回值:Hash 值,如果出现错误则该函数返回负数。
_euclideGetHashIndex(
dim3 size, // 尺寸参数
void *privated // 专属参数
);
// Host 函数:_euclideCreateTemplate(欧式模板生成函数)
// 生成欧式模板的函数。
static __host__ Template * // 返回值:生成的模板,若无法生成模板会返回 NULL。
_euclideCreateTemplate(
dim3 size, // 尺寸参数
void *privated // 专属参数
);
// Host 函数:_euclideIsEqualSize(欧式模板的尺寸相等判断函数)
// 欧式模板使用了尺寸中的一个维度,因此该函数只会检查尺寸参数的 x 维度是否相
// 等。同时还检查了 privated 在 float 型数据的条件下是否相等(即两个数的差的绝
// 对值小于某一个很小的正数)
static __host__ bool // 返回值:给定的两个尺寸是否是一样的。
_euclideIsEqualSize(
dim3 size1, // 第一个尺寸的尺寸参数
void *privated1, // 第一个尺寸的专属参数
dim3 size2, // 第二个尺寸的尺寸参数
void *privated2 // 第二个尺寸的专属参数
);
// Host 函数:_euclideCopyPrivated(欧式模板的专属参数拷贝函数)
// 按照 float 型数据的方式,申请一个新的 float 型数据空间,并将 privated 所指向
// 的 float 型数据拷贝到新的空间中。
static __host__ void * // 返回值:如果 privated 为 NULL,则返回 NULL;否则返回
// 新申请的数据空间。
_euclideCopyPrivated(
void *privated // 待拷贝的专属参数。
);
// Host 函数:_euclideDeletePrivated(欧式模板的专属参数释放函数)
// 按照释放一个 float 型数据的地址空间的方法释放给定的空间。如果 privated 是
// NULL 则不进行任何操作。
static __host__ bool // 返回值:如果释放成功返回 true,对于 NULL 参数,返回
// false。
_euclideDeletePrivated(
void *privated // 待释放的专属函数
);
// Host 全局变量:_euclideTemplateVendor(欧式模板生成器)
// 归纳定义欧式模板生成所需要的函数。
static TFTemplateVendor _euclideTemplateVendor = {
_euclideIsLegalParam,
_euclideGetHashIndex,
_euclideCreateTemplate,
_euclideIsEqualSize,
_euclideCopyPrivated,
_euclideDeletePrivated
};
// Host 函数:_euclideIsLegalParam(欧式模板参数判断函数)
static __host__ bool _euclideIsLegalParam(dim3 size, void *privated)
{
// 由于高斯模板使用了 float 型的专属参数,因此需要保证 privated 不为 NULL。
if (privated == NULL)
return false;
// 由于只是用了一个维度的尺寸变量,这里要求 z 维度必须为 1。
else if (size.z != 1)
return false;
// 这里了要求尺寸必须大于等于 1 且 x 和 y 分量必须相等或者 y 分量等于 1。
else if (size.x < 1 || (size.y != size.x && size.y != 1))
return false;
// 如果附属数据的值等于 0,则判断为非法。
else if (fabs(*((float *)privated)) < 1.0e-8f)
return false;
else
return true;
}
// Host 函数:_euclideGetHashIndex(欧式模板 Hash 算法函数)
static __host__ int _euclideGetHashIndex(dim3 size, void *privated)
{
// 如果高斯模板的专属参数为 NULL,则返回 -1 报错。
if (privated == NULL)
return -1;
// 这里将尺寸参数的 x 分量和专属参数进行异或拼合。
return (size.x ^ (int)(fabs(*(float *)privated) * 10.0f)) % TF_VOL_SHAPE;
}
// Host 函数:_euclideCreateTemplate(欧式模板生成函数)
static __host__ Template *_euclideCreateTemplate(dim3 size, void *privated)
{
// 如果专属参数为 NULL 则报错返回。
if (privated == NULL)
return NULL;
// 如果尺寸小于 1,则无法完成有效的计算,报错返回。
if (size.x < 1)
return NULL;
// 取出专属参数的值,该值在为 float 型,在生成欧式模板的过程中,称为 sigma
float sigma = *((float *)privated);
// 如果 sigma 值等于 0 则无法完成后续计算,报错退出。
if (fabs(sigma) < 1.0e-8f)
return NULL;
// 计算出 2 * sigma ^ 2,方便后续计算使用。
float sigma22 = 2 * sigma * sigma;
// 申请新的模板
Template *restpl;
int errcode;
errcode = TemplateBasicOp::newTemplate(&restpl);
if (errcode != NO_ERROR)
return NULL;
// 为新模板申请内存空间。
errcode = TemplateBasicOp::makeAtHost(restpl, size.x);
if (errcode != NO_ERROR) {
TemplateBasicOp::deleteTemplate(restpl);
return NULL;
}
// 取出存放坐标点和附属数据的内存空间指针,这样可以通过游标指针方便对数据空
// 间进行赋值操作。
int *tpldata = restpl->tplData;
float *attdata = ATTACHED_DATA(restpl);
// 依次添加模板数据。
for (int i = 0; i < size.x; i++) {
// 计算当前点对应的附属数据值。
float curatt = exp(-(i * i) / sigma22);
// 将坐标点添加到模板中,同时添加附属数据。
*(tpldata++) = i;
*(tpldata++) = 0;
*(attdata++) = curatt;
}
// 计算完毕,返回新的模板。
return restpl;
}
// Host 函数:_euclideEqualSize(欧式模板的尺寸相等判断函数)
static __host__ bool _euclideIsEqualSize(dim3 size1, void *privated1,
dim3 size2, void *privated2)
{
// 如果专属参数为 NULL,则恒返回 false 报错。
if (privated1 == NULL || privated2 == NULL)
return false;
// 如果按照 float 类型判断,两个专属参数不相等(即绝对值差大于某个小正
// 数),则判定为不相等。
if (fabs(*((float *)privated1) - *((float *)privated2)) >= 1.0e-6f)
return false;
// 如果尺寸参数不相等,则会判定为两个参数不相等
if (size1.x != size2.x/* || size1.y != size2.y*/)
return false;
return true;
}
// Host 函数:_euclideCopyPrivated(欧式模板的专属参数拷贝函数)
static __host__ void *_euclideCopyPrivated(void *privated)
{
// 如果专属参数为 NULL,则直接返回 NULL。
if (privated == NULL)
return NULL;
// 申请一个 float 型的空间
float *resptr = new float;
if (resptr == NULL)
return NULL;
// 然后将数据拷贝如这个空间内。
*resptr = *((float *)privated);
// 返回这个已拷贝了数据的新申请的空间地址。
return resptr;
}
// Host 函数:_euclideDeletePrivated(欧式模板的专属参数释放函数)
static __host__ bool _euclideDeletePrivated(void *privated)
{
// 如果专属参数为 NULL,则直接返回 NULL。
if (privated == NULL)
return false;
// 释放掉 privated 的空间。
delete (float *)privated;
return true;
}
// 综合归纳定义各个模板:
// Host 全局变量:_templateVendorArray(模板生成器集合)
// 通过这个数组管理系统中所有的模板生成器,通过索引可以灵活的访问到系统中所有的
// 模板生成器的函数。
static TFTemplateVendor *_templateVendorArray[] = {
&_boxTemplateVendor, // 矩形模板生成器
&_circleTemplateVendor, // 圆形模板生成器
&_arcTemplateVendor, // 环形模板生成器
&_gaussTemplateVendor, // 高斯模板生成器
&_euclideTemplateVendor, // 欧式模板生成器
&_defTemplateVendor // 标准模板生成器(用于演示的代码示例)
};
// TemplateFactory CLASS 的实现方法
// 静态成员变量:tplPool(模板池)
// 初始化 tplPool 的值为 NULL。
Template *
TemplateFactory::tplPool[TF_CNT_SHAPE][TF_VOL_SHAPE * TF_SET_SHAPE] = { NULL };
// 静态成员变量:sizePool(模板池尺寸参数)
// 初始化 sizePool 的值为 (0, 0, 0)。
dim3
TemplateFactory::sizePool[TF_CNT_SHAPE][TF_VOL_SHAPE * TF_SET_SHAPE] = {
dim3(0, 0, 0)
};
// 静态成员变量:privatePool(模板池专属参数)
// 初始化 privatePool 的值为 NULL。
void *
TemplateFactory::privatePool[TF_CNT_SHAPE][TF_VOL_SHAPE * TF_SET_SHAPE] = {
NULL
};
// 静态成员变量:countPool(模板池使用计数器)
// 初始化 countPool 的值为 0。
int
TemplateFactory::countPool[TF_CNT_SHAPE][TF_VOL_SHAPE * TF_SET_SHAPE] = { 0 };
// Host 静态方法:boostTemplateEntry(提升指定的模板条目)
__host__ bool TemplateFactory::boostTemplateEntry(int shape, int idx)
{
// 检查形状参数和下标参数的合法性(由于这一内部函数在掉用前可以保证正确性,
// 因此我们注释了下面的代码)
//if (shape < 0 || shape >= TF_CNT_SHAPE ||
// idx < 0 || idx >= TF_VOL_SHAPE * TF_SET_SHAPE)
// return false;
// 如果给定的下标可以被组块尺寸整除则说明该下标为某个组块的开头一个下标,这
// 个下标下的条目已经不能够在继续提升了,因为再提升就会进入其他的组块,从而
// 造成后续处理的错误。
if (idx % TF_SET_SHAPE == 0)
return false;
// 进行两个模板条目的交换。
// 首先将 idx 位置的条目保存到一个临时内存区域内。
Template *tmptpl = tplPool[shape][idx];
dim3 tmpsize = sizePool[shape][idx];
void *tmppriv = privatePool[shape][idx];
int tmpcount = countPool[shape][idx];
// 再将 idx - 1 位置的标目保存到 idx 位置处。
tplPool[shape][idx] = tplPool[shape][idx - 1];
sizePool[shape][idx] = sizePool[shape][idx - 1];
privatePool[shape][idx] = privatePool[shape][idx - 1];
countPool[shape][idx] = countPool[shape][idx - 1];
// 最后将存在来临时内存区域内的原 idx 位置的数据放入 idx - 1 处。
tplPool[shape][idx - 1] = tmptpl;
sizePool[shape][idx - 1] = tmpsize;
privatePool[shape][idx - 1] = tmppriv;
countPool[shape][idx - 1] = tmpcount;
// 处理完毕,退出返回
return true;
}
// Host 静态方法:getTemplate(根据参数得到需要的模板)
__host__ int TemplateFactory::getTemplate(Template **tpl, int shape,
dim3 size, void *privated)
{
// 判断给定的形状是否合法
if (shape < 0 || shape >= TF_CNT_SHAPE)
return INVALID_DATA;
// 检查用于输出的模板指针是否为空。
if (tpl == NULL)
return NULL_POINTER;
// 判断尺寸是否合法
if (!_templateVendorArray[shape]->isLegalParam(size, privated))
return INVALID_DATA;
int hashidx; // Hash 索引值
int posidx; // 模板池的下标游标值,该值通过 Hash 索引值推算而得。
int startposidx; // 模板池中存放所要查找模板的起始下标。
int endposidx; // 模板池中存放所要查找模板的最后一个位置的下一个位置的下
// 标。
// 调用模板生成器中的 Hash 函数生成索引
hashidx = _templateVendorArray[shape]->getHashIndex(size, privated);
// 如果模板生成器返回的 Hash 值无法使用,则报错退出。
if (hashidx < 0 || hashidx > TF_VOL_SHAPE)
return INVALID_DATA;
// 由 Hash 索引定位模板的对应首位置与结束位置
startposidx = hashidx * TF_SET_SHAPE;
endposidx = startposidx + TF_SET_SHAPE;
// 循环查找模板池是否有对应模板
for (posidx = startposidx; posidx < endposidx; posidx++) {
// 如果发现当前位置的模板为 NULL,所有所要查找的模板尚未出现在模板池
// 中,需要创建创建这个模板。
if (tplPool[shape][posidx] == NULL) {
break;
} else if (_templateVendorArray[shape]->isEqualSize(
sizePool[shape][posidx], privatePool[shape][posidx],
size, privated)) {
// 在模板池中找到了需要的模板
*tpl = tplPool[shape][posidx];
countPool[shape][posidx]++;
#ifdef TF_ENABLE_KICK
// 如果当前被选中的模板不处于模板池的首位置,则跟它前面的模板进行交
// 换位置,使得这个模板不容易被替换出去。该段代码只在启动了替换机制
// 后才会被执行。
boostTemplateEntry(shape, posidx);
#endif
// 找到模板后直接退出
return NO_ERROR;
}
}
// 如果在模板池中找到了对应的模板,则带么已经通过上面的 else if 分支返回到
// 上层函数,因此下面的代码将处理没有找到对应模板的情况。
// 首先,创建对应的模板。这里先创建模板是因为如果一旦创建失败,就没有必要再
// 去尝试这将这个模板放入到模板池中。
*tpl = _templateVendorArray[shape]->createTemplate(size, privated);
// 检查得到的方形模板是否为空
if (*tpl == NULL)
return OUT_OF_MEM;
// 然后,如果模板池已经满了,那么必须从模板池中踢出一个模板,然后在放入新的
// 模板。(当然,如果我们吧替换功能关闭了,那么该部分代码直接以没有找到替换
// 位置的姿态离开该段代码)
if (posidx >= endposidx) {
#ifdef TF_ENABLE_KICK
// 一些要使用到的临时变量
bool replace = false; // 标志位,表示是否找到和替换位置
posidx = endposidx - 1; // 因为落在后面的模板是更为不常用的模板,因此
// 我们设定起始下标为最末一个下标。
int stopposidx = // 查找停止下标。为了防止不合理
(startposidx + endposidx) / 2; // 的替换,我们令在前面的模板不
// 会被替换掉。
// 从后向前查找,找到一个暂时没有被使用的模板,然后替换掉他。这里使用
// do-while 循环的形式,就是保证至少查找了最后一个条目。用来防止,如果
// 组块设置得太小的时候会导致不进行查找直接认为没有找到合适的条目的情
// 况。
do {
// 如果当前的条目下的模板没有被使用。这里之所以要寻找没有使用的模
// 板,是因为一旦将正在使用的模板踢出模板池,我们将无法控制这个模板
// 何时应该释放:如果当场释放,那么虽然不会带来内存泄漏的问题,但是
// 正在使用模板的程序可能会因为读不到数据而崩溃;如果过后释放,则仍
// 就可能会导致其他使用者的崩溃。因此,这里我们目前只是寻找目前没有
// 在使用中的模板,这样我们就能够安全的释放它,而不用担心内存泄漏或
// 者使用者崩溃的问题。
if (countPool[shape][posidx] == 0) {
// 设定标志位,并且删除该模板,腾出内存空间。
replace = true;
// 删除模板,防止内存泄漏。由于后续步骤马上就要改写模板池中的内
// 容,因此这里注视掉了将模板池重置为 NULL 的代码,以减少一些不
// 必要的代码执行。下面注释掉的专属变量重置为 NULL 的代码被注释
// 掉的原因亦然。
TemplateBasicOp::deleteTemplate(tplPool[shape][posidx]);
//tplPool[shape][posidx] = NULL;
// 这里还需要释放掉模板池中的专属参数,因为这个空间都是在将模板
// 加入到模板池中的时候才申请的,因此需要在内部将其释放掉。
_templateVendorArray[shape]->deletePrivated(
privatePool[shape][posidx]);
//privatePool[shape][posidx] = NULL;
// 找到合适的位置后直接从循环中跳出。
break;
}
} while (posidx-- >= stopposidx);
// 如果没有找到提供换位置,则将下标赋值为一个哑值。
if (!replace)
posidx = -1;
#else
// 对于关闭了替换功能的代码,这里直接将下标置为一个哑值。
posidx = -1;
#endif
}
// 给模板池和模板对应参数赋值,这里首先要检查 posidx 是不是哑值,因为即便是
// 在模板池内没有找到合适的位置来放置新的模板,我们也会创造一个模板来给用户
// 使用,这样的模板会在调用 putTemplate 时被销毁(该函数会发现该模板没有在
// 模板池中存在)。
if (posidx >= startposidx && posidx < endposidx) {
tplPool[shape][posidx] = *tpl;
sizePool[shape][posidx] = size;
// 这里我们调用 copyPrivated 函数,因为用户输入的 privated 指针指向的数
// 据是不稳定的,它可能会被改写,会被释放,这样我们的系统也会变得不稳
// 定。为了防止这种不稳定性,我们将它拷贝一份出来,供模板池内部专用。
privatePool[shape][posidx] =
_templateVendorArray[shape]->copyPrivated(privated);
countPool[shape][posidx] = 1;
}
// 处理完毕,退出
return NO_ERROR;
}
// host 静态方法:putTemplate(放回通过 getTemplate 得到的模板)
__host__ int TemplateFactory::putTemplate(Template *tpl)
{
// 如果输入的模板是 NULL,则直接返回。
if (tpl == NULL)
return NULL_POINTER;
// 查找模板池中有没有对应的模板,如果模板池中存在对应的模板,则将模板的使用
// 计数器进行调整。
for (int shape = 0; shape < TF_CNT_SHAPE; shape++) {
for (int i = 0; i < TF_VOL_SHAPE * TF_SET_SHAPE; i++) {
// 模板池中找到了对应模板,让模板计数器减 1。
if (tplPool[shape][i] == tpl) {
if (countPool[shape][i] > 0)
countPool[shape][i]--;
// 处理完毕,退出
return NO_ERROR;
}
}
}
// 模板此没有找到对应的模板,则直接释放该模板
TemplateBasicOp::deleteTemplate(tpl);
// 处理完毕,退出
return NO_ERROR;
}
|
the_stack
|
#include "nvblox/core/accessors.h"
#include "nvblox/core/common_names.h"
#include "nvblox/integrators/integrators_common.h"
#include "nvblox/mesh/impl/marching_cubes_table.h"
#include "nvblox/mesh/marching_cubes.h"
#include "nvblox/mesh/mesh_integrator.h"
#include "nvblox/utils/timing.h"
namespace nvblox {
void MeshIntegrator::colorMesh(const ColorLayer& color_layer,
BlockLayer<MeshBlock>* mesh_layer) {
colorMesh(color_layer, mesh_layer->getAllBlockIndices(), mesh_layer);
}
void MeshIntegrator::colorMesh(const ColorLayer& color_layer,
const std::vector<Index3D>& block_indices,
BlockLayer<MeshBlock>* mesh_layer) {
// Default choice is GPU
colorMeshGPU(color_layer, block_indices, mesh_layer);
}
//
/* Color Mesh blocks on the GPU
*
* Call with
* - one ThreadBlock per VoxelBlock, GridDim 1D
* - BlockDim 1D, any size: we implement a stridded access pattern over
* MeshBlock verticies
*
* @param: color_blocks: a list of color blocks which correspond in position
* to mesh_blocks
* @param: block_indices: a list of blocks indices.
* @param: cuda_mesh_blocks: a list of mesh_blocks to be colored.
*/
__global__ void colorMeshBlockByClosestColorVoxel(
const ColorBlock** color_blocks, const Index3D* block_indices,
const float block_size, const float voxel_size,
CudaMeshBlock* cuda_mesh_blocks) {
// Block
const ColorBlock* color_block_ptr = color_blocks[blockIdx.x];
const Index3D block_index = block_indices[blockIdx.x];
CudaMeshBlock cuda_mesh_block = cuda_mesh_blocks[blockIdx.x];
// The position of this block in the layer
const Vector3f p_L_B_m = getPositionFromBlockIndex(block_size, block_index);
// Interate through MeshBlock vertices - Stidded access pattern
for (int i = threadIdx.x; i < cuda_mesh_block.size; i += blockDim.x) {
// The position of this vertex in the layer
const Vector3f p_L_V_m = cuda_mesh_block.vertices[i];
// The position of this vertex in the block
const Vector3f p_B_V_m = p_L_V_m - p_L_B_m;
// Convert this to a voxel index
Index3D voxel_idx_in_block = (p_B_V_m.array() / voxel_size).cast<int>();
// NOTE(alexmillane): Here we make some assumptions.
// - We assume that the closest voxel to p_L_V is in the ColorBlock
// co-located with the MeshBlock from which p_L_V was drawn.
// - This is will (very?) occationally be incorrect when mesh vertices
// escape block boundaries. However, making this assumption saves us any
// neighbour calculations.
constexpr size_t KVoxelsPerSizeMinusOne =
VoxelBlock<ColorVoxel>::kVoxelsPerSide - 1;
voxel_idx_in_block =
voxel_idx_in_block.array().min(KVoxelsPerSizeMinusOne).max(0);
// Get the color voxel
const ColorVoxel color_voxel =
color_block_ptr->voxels[voxel_idx_in_block.x()] // NOLINT
[voxel_idx_in_block.y()] // NOLINT
[voxel_idx_in_block.z()];
// Write the color out to global memory
cuda_mesh_block.colors[i] = color_voxel.color;
}
}
__global__ void colorMeshBlocksConstant(Color color,
CudaMeshBlock* cuda_mesh_blocks) {
// Each threadBlock operates on a single MeshBlock
CudaMeshBlock cuda_mesh_block = cuda_mesh_blocks[blockIdx.x];
// Interate through MeshBlock vertices - Stidded access pattern
for (int i = threadIdx.x; i < cuda_mesh_block.size; i += blockDim.x) {
cuda_mesh_block.colors[i] = color;
}
}
void colorMeshBlocksConstantGPU(const std::vector<Index3D>& block_indices,
const Color& color, MeshLayer* mesh_layer,
cudaStream_t cuda_stream) {
CHECK_NOTNULL(mesh_layer);
if (block_indices.size() == 0) {
return;
}
// Prepare CudaMeshBlocks, which are effectively containers of device pointers
std::vector<CudaMeshBlock> cuda_mesh_blocks;
cuda_mesh_blocks.resize(block_indices.size());
for (int i = 0; i < block_indices.size(); i++) {
cuda_mesh_blocks[i] =
CudaMeshBlock(mesh_layer->getBlockAtIndex(block_indices[i]).get());
}
// Allocate
CudaMeshBlock* cuda_mesh_block_device_ptrs;
checkCudaErrors(cudaMalloc(&cuda_mesh_block_device_ptrs,
cuda_mesh_blocks.size() * sizeof(CudaMeshBlock)));
// Host -> GPU
checkCudaErrors(
cudaMemcpyAsync(cuda_mesh_block_device_ptrs, cuda_mesh_blocks.data(),
cuda_mesh_blocks.size() * sizeof(CudaMeshBlock),
cudaMemcpyHostToDevice, cuda_stream));
// Kernel call - One ThreadBlock launched per VoxelBlock
constexpr int kThreadsPerBlock = 8 * 32; // Chosen at random
const int num_blocks = block_indices.size();
colorMeshBlocksConstant<<<num_blocks, kThreadsPerBlock, 0, cuda_stream>>>(
Color::Gray(), // NOLINT
cuda_mesh_block_device_ptrs);
checkCudaErrors(cudaStreamSynchronize(cuda_stream));
checkCudaErrors(cudaPeekAtLastError());
// Deallocate
checkCudaErrors(cudaFree(cuda_mesh_block_device_ptrs));
}
void colorMeshBlockByClosestColorVoxelGPU(
const ColorLayer& color_layer, const std::vector<Index3D>& block_indices,
MeshLayer* mesh_layer, cudaStream_t cuda_stream) {
CHECK_NOTNULL(mesh_layer);
if (block_indices.size() == 0) {
return;
}
// Get the locations (on device) of the color blocks
// NOTE(alexmillane): This function assumes that all block_indices have been
// checked to exist in color_layer.
std::vector<const ColorBlock*> color_blocks =
getBlockPtrsFromIndices(block_indices, color_layer);
// Prepare CudaMeshBlocks, which are effectively containers of device pointers
std::vector<CudaMeshBlock> cuda_mesh_blocks;
cuda_mesh_blocks.resize(block_indices.size());
for (int i = 0; i < block_indices.size(); i++) {
cuda_mesh_blocks[i] =
CudaMeshBlock(mesh_layer->getBlockAtIndex(block_indices[i]).get());
}
// Allocate
const ColorBlock** color_block_device_ptrs;
checkCudaErrors(cudaMalloc(&color_block_device_ptrs,
color_blocks.size() * sizeof(ColorBlock*)));
Index3D* block_indices_device_ptr;
checkCudaErrors(cudaMalloc(&block_indices_device_ptr,
block_indices.size() * sizeof(Index3D)));
CudaMeshBlock* cuda_mesh_block_device_ptrs;
checkCudaErrors(cudaMalloc(&cuda_mesh_block_device_ptrs,
cuda_mesh_blocks.size() * sizeof(CudaMeshBlock)));
// Host -> GPU transfers
checkCudaErrors(cudaMemcpyAsync(color_block_device_ptrs, color_blocks.data(),
color_blocks.size() * sizeof(ColorBlock*),
cudaMemcpyHostToDevice, cuda_stream));
checkCudaErrors(cudaMemcpyAsync(block_indices_device_ptr,
block_indices.data(),
block_indices.size() * sizeof(Index3D),
cudaMemcpyHostToDevice, cuda_stream));
checkCudaErrors(
cudaMemcpyAsync(cuda_mesh_block_device_ptrs, cuda_mesh_blocks.data(),
cuda_mesh_blocks.size() * sizeof(CudaMeshBlock),
cudaMemcpyHostToDevice, cuda_stream));
// Kernel call - One ThreadBlock launched per VoxelBlock
constexpr int kThreadsPerBlock = 8 * 32; // Chosen at random
const int num_blocks = block_indices.size();
const float voxel_size =
mesh_layer->block_size() / VoxelBlock<TsdfVoxel>::kVoxelsPerSide;
colorMeshBlockByClosestColorVoxel<<<num_blocks, kThreadsPerBlock, 0,
cuda_stream>>>(
color_block_device_ptrs, // NOLINT
block_indices_device_ptr, // NOLINT
mesh_layer->block_size(), // NOLINT
voxel_size, // NOLINT
cuda_mesh_block_device_ptrs);
checkCudaErrors(cudaStreamSynchronize(cuda_stream));
checkCudaErrors(cudaPeekAtLastError());
// Deallocate
checkCudaErrors(cudaFree(color_block_device_ptrs));
checkCudaErrors(cudaFree(block_indices_device_ptr));
checkCudaErrors(cudaFree(cuda_mesh_block_device_ptrs));
}
void MeshIntegrator::colorMeshGPU(const ColorLayer& color_layer,
MeshLayer* mesh_layer) {
colorMeshGPU(color_layer, mesh_layer->getAllBlockIndices(), mesh_layer);
}
void MeshIntegrator::colorMeshGPU(
const ColorLayer& color_layer,
const std::vector<Index3D>& requested_block_indices,
MeshLayer* mesh_layer) {
CHECK_NOTNULL(mesh_layer);
CHECK_EQ(color_layer.block_size(), mesh_layer->block_size());
// NOTE(alexmillane): Generally, some of the MeshBlocks which we are
// "coloring" will not have data in the color layer. HOWEVER, for colored
// MeshBlocks (ie with non-empty color members), the size of the colors must
// match vertices. Therefore we "color" all requested block_indices in two
// parts:
// - The first part using the color layer, and
// - the second part a constant color.
// Check for each index, that the MeshBlock exists, and if it does
// allocate space for color.
std::vector<Index3D> block_indices;
block_indices.reserve(requested_block_indices.size());
std::for_each(
requested_block_indices.begin(), requested_block_indices.end(),
[&mesh_layer, &block_indices](const Index3D& block_idx) {
if (mesh_layer->isBlockAllocated(block_idx)) {
mesh_layer->getBlockAtIndex(block_idx)->expandColorsToMatchVertices();
block_indices.push_back(block_idx);
}
});
// Split block indices into two groups, one group containing indices with
// corresponding ColorBlocks, and one without.
std::vector<Index3D> block_indices_in_color_layer;
std::vector<Index3D> block_indices_not_in_color_layer;
block_indices_in_color_layer.reserve(block_indices.size());
block_indices_not_in_color_layer.reserve(block_indices.size());
for (const Index3D& block_idx : block_indices) {
if (color_layer.isBlockAllocated(block_idx)) {
block_indices_in_color_layer.push_back(block_idx);
} else {
block_indices_not_in_color_layer.push_back(block_idx);
}
}
// Color
colorMeshBlockByClosestColorVoxelGPU(
color_layer, block_indices_in_color_layer, mesh_layer, cuda_stream_);
colorMeshBlocksConstantGPU(block_indices_not_in_color_layer,
default_mesh_color_, mesh_layer, cuda_stream_);
}
void MeshIntegrator::colorMeshCPU(const ColorLayer& color_layer,
BlockLayer<MeshBlock>* mesh_layer) {
colorMeshCPU(color_layer, mesh_layer->getAllBlockIndices(), mesh_layer);
}
void MeshIntegrator::colorMeshCPU(const ColorLayer& color_layer,
const std::vector<Index3D>& block_indices,
BlockLayer<MeshBlock>* mesh_layer) {
// For each vertex just grab the closest color
for (const Index3D& block_idx : block_indices) {
MeshBlock::Ptr block = mesh_layer->getBlockAtIndex(block_idx);
if (block == nullptr) {
continue;
}
block->colors.resize(block->vertices.size());
for (int i = 0; i < block->vertices.size(); i++) {
const Vector3f& vertex = block->vertices[i];
const ColorVoxel* color_voxel;
if (getVoxelAtPosition<ColorVoxel>(color_layer, vertex, &color_voxel)) {
block->colors[i] = color_voxel->color;
} else {
block->colors[i] = Color::Gray();
}
}
}
}
} // namespace nvblox
|
the_stack
|
#ifdef GPU
#if ( MODEL == HYDRO )
__global__
void CUFLU_dtSolver_HydroCFL( real g_dt_Array[], const real g_Flu_Array[][FLU_NIN_T][ CUBE(PS1) ],
const real g_Mag_Array[][NCOMP_MAG][ PS1P1*SQR(PS1) ],
const real dh, const real Safety, const real MinPres, const EoS_t EoS );
#ifdef GRAVITY
__global__
void CUPOT_dtSolver_HydroGravity( real g_dt_Array[], const real g_Pot_Array[][ CUBE(GRA_NXT) ],
const double g_Corner_Array[][3],
const real dh, const real Safety, const bool P5_Gradient,
const bool UsePot, const OptExtAcc_t ExtAcc, const ExtAcc_t ExtAcc_Func,
const double ExtAcc_Time );
#endif
#elif ( MODEL == ELBDM )
#else
#error : ERROR : unsupported MODEL !!
#endif // MODEL
// device pointers
extern real *d_dt_Array_T;
extern real (*d_Flu_Array_T)[FLU_NIN_T][ CUBE(PS1) ];
#ifdef GRAVITY
extern real (*d_Pot_Array_T)[ CUBE(GRA_NXT) ];
extern double (*d_Corner_Array_PGT)[3];
#endif
#ifdef MHD
extern real (*d_Mag_Array_T)[NCOMP_MAG][ PS1P1*SQR(PS1) ];
#else
static real (*d_Mag_Array_T)[NCOMP_MAG][ PS1P1*SQR(PS1) ] = NULL;
#endif
extern cudaStream_t *Stream;
//-------------------------------------------------------------------------------------------------------
// Function : CUAPI_Asyn_dtSolver
// Description : Invoke various dt solvers
//
// ***********************************************************
// ** Asynchronous Function **
// ** **
// ** will return before the execution in GPU is complete **
// ***********************************************************
//
// Note : 1. Use streams for the asychronous memory copy between device and host
// 2. Prefix "d" : for pointers pointing to the "Device" memory space
// Prefix "h" : for pointers pointing to the "Host" memory space
//
// Parameter : TSolver : Target dt solver
// --> DT_FLU_SOLVER : dt solver for fluid
// DT_GRA_SOLVER : dt solver for gravity
// h_dt_Array : Host array to store the minimum dt in each target patch
// h_Flu_Array : Host array storing the prepared fluid data of each target patch
// h_Mag_Array : Host array storing the prepared B field data of each target patch
// h_Pot_Array : Host array storing the prepared potential data of each target patch
// h_Corner_Array : Array storing the physical corner coordinates of each patch
// NPatchGroup : Number of patch groups evaluated simultaneously by GPU
// dh : Grid size
// Safety : dt safety factor
// MinPres : Minimum allowed pressure
// P5_Gradient : Use 5-points stencil to evaluate the potential gradient
// UsePot : Add self-gravity and/or external potential
// ExtAcc : Add external acceleration
// TargetTime : Target physical time
// GPU_NStream : Number of CUDA streams for the asynchronous memory copy
//
// Return : h_dt_Array
//-------------------------------------------------------------------------------------------------------
void CUAPI_Asyn_dtSolver( const Solver_t TSolver, real h_dt_Array[], const real h_Flu_Array[][FLU_NIN_T][ CUBE(PS1) ],
const real h_Mag_Array[][NCOMP_MAG][ PS1P1*SQR(PS1) ], const real h_Pot_Array[][ CUBE(GRA_NXT) ],
const double h_Corner_Array[][3], const int NPatchGroup, const real dh, const real Safety,
const real MinPres, const bool P5_Gradient, const bool UsePot, const OptExtAcc_t ExtAcc,
const double TargetTime, const int GPU_NStream )
{
// check
# ifdef GAMER_DEBUG
if ( TSolver != DT_FLU_SOLVER )
# ifdef GRAVITY
if ( TSolver != DT_GRA_SOLVER )
# endif
Aux_Error( ERROR_INFO, "TSolver != DT_FLU_SOLVER / DT_GRA_SOLVER !!\n" );
if ( h_dt_Array == NULL )
Aux_Error( ERROR_INFO, "h_dt_Array == NULL !!\n" );
if ( TSolver == DT_FLU_SOLVER && h_Flu_Array == NULL )
Aux_Error( ERROR_INFO, "h_Flu_Array == NULL !!\n" );
# ifdef GRAVITY
if ( TSolver == DT_GRA_SOLVER )
{
if ( UsePot && h_Pot_Array == NULL )
Aux_Error( ERROR_INFO, "h_Pot_Array == NULL !!\n" );
if ( ExtAcc )
{
if ( h_Corner_Array == NULL ) Aux_Error( ERROR_INFO, "h_Corner_Array == NULL !!\n" );
if ( d_Corner_Array_PGT == NULL ) Aux_Error( ERROR_INFO, "d_Corner_Array_PGT == NULL !!\n" );
}
}
# endif
# ifdef MHD
if ( TSolver == DT_FLU_SOLVER && h_Mag_Array == NULL )
Aux_Error( ERROR_INFO, "h_Mag_Array == NULL !!\n" );
# endif
# endif // #ifdef GAMER_DEBUG
// set the block size
const int NPatch = NPatchGroup*8;
dim3 BlockDim_dtSolver( 1, 1, 1 );
switch ( TSolver )
{
case DT_FLU_SOLVER:
BlockDim_dtSolver.x = DT_FLU_BLOCK_SIZE;
break;
# ifdef GRAVITY
case DT_GRA_SOLVER:
BlockDim_dtSolver.x = DT_GRA_BLOCK_SIZE;
break;
# endif
default :
Aux_Error( ERROR_INFO, "incorrect parameter %s = %d !!\n", "TSolver", TSolver );
}
// set the number of patches and the corresponding data size to be transferred into GPU in each stream
int *NPatch_per_Stream = new int [GPU_NStream];
int *UsedPatch = new int [GPU_NStream];
int *dt_MemSize = new int [GPU_NStream];
int *Corner_MemSize = new int [GPU_NStream];
int *Flu_MemSize = ( TSolver == DT_FLU_SOLVER ) ? new int [GPU_NStream] : NULL;
# ifdef MHD
int *Mag_MemSize = ( TSolver == DT_FLU_SOLVER ) ? new int [GPU_NStream] : NULL;
# endif
# ifdef GRAVITY
int *Pot_MemSize = ( TSolver == DT_GRA_SOLVER ) ? new int [GPU_NStream] : NULL;
# endif
// number of patches in each stream
UsedPatch[0] = 0;
if ( GPU_NStream == 1 ) NPatch_per_Stream[0] = NPatch;
else
{
for (int s=0; s<GPU_NStream-1; s++)
{
NPatch_per_Stream[s] = NPatch / GPU_NStream;
UsedPatch[s+1] = UsedPatch[s] + NPatch_per_Stream[s];
}
NPatch_per_Stream[GPU_NStream-1] = NPatch - UsedPatch[GPU_NStream-1];
}
// corresponding data size to be transferred into GPU in each stream
for (int s=0; s<GPU_NStream; s++)
{
switch ( TSolver )
{
case DT_FLU_SOLVER:
Flu_MemSize [s] = sizeof(real )*NPatch_per_Stream[s]*CUBE(PS1)*FLU_NIN_T;
# ifdef MHD
Mag_MemSize [s] = sizeof(real )*NPatch_per_Stream[s]*PS1P1*SQR(PS1)*NCOMP_MAG;
# endif
break;
# ifdef GRAVITY
case DT_GRA_SOLVER:
Pot_MemSize [s] = sizeof(real )*NPatch_per_Stream[s]*CUBE(GRA_NXT);
Corner_MemSize[s] = sizeof(double)*NPatch_per_Stream[s]*3;
break;
# endif
default :
Aux_Error( ERROR_INFO, "incorrect parameter %s = %d !!\n", "TSolver", TSolver );
}
dt_MemSize[s] = sizeof(real)*NPatch_per_Stream[s];
}
// a. copy data from host to device
//=========================================================================================
for (int s=0; s<GPU_NStream; s++)
{
if ( NPatch_per_Stream[s] == 0 ) continue;
switch ( TSolver )
{
case DT_FLU_SOLVER:
CUDA_CHECK_ERROR( cudaMemcpyAsync( d_Flu_Array_T + UsedPatch[s], h_Flu_Array + UsedPatch[s],
Flu_MemSize[s], cudaMemcpyHostToDevice, Stream[s] ) );
# ifdef MHD
CUDA_CHECK_ERROR( cudaMemcpyAsync( d_Mag_Array_T + UsedPatch[s], h_Mag_Array + UsedPatch[s],
Mag_MemSize[s], cudaMemcpyHostToDevice, Stream[s] ) );
# endif
break;
# ifdef GRAVITY
case DT_GRA_SOLVER:
if ( UsePot )
CUDA_CHECK_ERROR( cudaMemcpyAsync( d_Pot_Array_T + UsedPatch[s], h_Pot_Array + UsedPatch[s],
Pot_MemSize[s], cudaMemcpyHostToDevice, Stream[s] ) );
if ( ExtAcc )
CUDA_CHECK_ERROR( cudaMemcpyAsync( d_Corner_Array_PGT + UsedPatch[s], h_Corner_Array + UsedPatch[s],
Corner_MemSize[s], cudaMemcpyHostToDevice, Stream[s] ) );
break;
# endif
default :
Aux_Error( ERROR_INFO, "incorrect parameter %s = %d !!\n", "TSolver", TSolver );
}
} // for (int s=0; s<GPU_NStream; s++)
// b. execute the kernel
//=========================================================================================
for (int s=0; s<GPU_NStream; s++)
{
if ( NPatch_per_Stream[s] == 0 ) continue;
# if ( MODEL == HYDRO )
switch ( TSolver )
{
case DT_FLU_SOLVER:
CUFLU_dtSolver_HydroCFL <<< NPatch_per_Stream[s], BlockDim_dtSolver, 0, Stream[s] >>>
( d_dt_Array_T + UsedPatch[s],
d_Flu_Array_T + UsedPatch[s],
d_Mag_Array_T + UsedPatch[s],
dh, Safety, MinPres, EoS );
break;
# ifdef GRAVITY
case DT_GRA_SOLVER:
CUPOT_dtSolver_HydroGravity <<< NPatch_per_Stream[s], BlockDim_dtSolver, 0, Stream[s] >>>
( d_dt_Array_T + UsedPatch[s],
d_Pot_Array_T + UsedPatch[s],
d_Corner_Array_PGT + UsedPatch[s],
dh, Safety, P5_Gradient, UsePot, ExtAcc, GPUExtAcc_Ptr, TargetTime );
break;
# endif
default :
Aux_Error( ERROR_INFO, "incorrect parameter %s = %d !!\n", "TSolver", TSolver );
}
# elif ( MODEL == ELBDM )
# else
# error : unsupported MODEL !!
# endif // MODEL
CUDA_CHECK_ERROR( cudaGetLastError() );
} // for (int s=0; s<GPU_NStream; s++)
// c. copy data from device to host
//=========================================================================================
for (int s=0; s<GPU_NStream; s++)
{
if ( NPatch_per_Stream[s] == 0 ) continue;
CUDA_CHECK_ERROR( cudaMemcpyAsync( h_dt_Array + UsedPatch[s], d_dt_Array_T + UsedPatch[s],
dt_MemSize[s], cudaMemcpyDeviceToHost, Stream[s] ) );
} // for (int s=0; s<GPU_NStream; s++)
delete [] NPatch_per_Stream;
delete [] UsedPatch;
delete [] dt_MemSize;
delete [] Corner_MemSize;
delete [] Flu_MemSize;
# ifdef MHD
delete [] Mag_MemSize;
# endif
# ifdef GRAVITY
delete [] Pot_MemSize;
# endif
} // FUNCTION : CUAPI_Asyn_dtSolver
#endif // #ifdef GPU
|
the_stack
|
* \file
* cub::DeviceRunLengthEncode provides device-wide, parallel operations for computing a run-length encoding across a sequence of data items residing within device-accessible memory.
*/
#pragma once
#include <stdio.h>
#include <iterator>
#include "../config.cuh"
#include "dispatch/dispatch_rle.cuh"
#include "dispatch/dispatch_reduce_by_key.cuh"
/// Optional outer namespace(s)
CUB_NS_PREFIX
/// CUB namespace
namespace cub {
/**
* \brief DeviceRunLengthEncode provides device-wide, parallel operations for demarcating "runs" of same-valued items within a sequence residing within device-accessible memory. 
* \ingroup SingleModule
*
* \par Overview
* A <a href="http://en.wikipedia.org/wiki/Run-length_encoding"><em>run-length encoding</em></a>
* computes a simple compressed representation of a sequence of input elements such that each
* maximal "run" of consecutive same-valued data items is encoded as a single data value along with a
* count of the elements in that run.
*
* \par Usage Considerations
* \cdp_class{DeviceRunLengthEncode}
*
* \par Performance
* \linear_performance{run-length encode}
*
* \par
* The following chart illustrates DeviceRunLengthEncode::RunLengthEncode performance across
* different CUDA architectures for \p int32 items.
* Segments have lengths uniformly sampled from [1,1000].
*
* \image html rle_int32_len_500.png
*
* \par
* \plots_below
*
*/
struct DeviceRunLengthEncode
{
/**
* \brief Computes a run-length encoding of the sequence \p d_in.
*
* \par
* - For the <em>i</em><sup>th</sup> run encountered, the first key of the run and its length are written to
* <tt>d_unique_out[<em>i</em>]</tt> and <tt>d_counts_out[<em>i</em>]</tt>,
* respectively.
* - The total number of runs encountered is written to \p d_num_runs_out.
* - The <tt>==</tt> equality operator is used to determine whether values are equivalent
* - \devicestorage
*
* \par Performance
* The following charts illustrate saturated encode performance across different
* CUDA architectures for \p int32 and \p int64 items, respectively. Segments have
* lengths uniformly sampled from [1,1000].
*
* \image html rle_int32_len_500.png
* \image html rle_int64_len_500.png
*
* \par
* The following charts are similar, but with segment lengths uniformly sampled from [1,10]:
*
* \image html rle_int32_len_5.png
* \image html rle_int64_len_5.png
*
* \par Snippet
* The code snippet below illustrates the run-length encoding of a sequence of \p int values.
* \par
* \code
* #include <cub/cub.cuh> // or equivalently <cub/device/device_run_length_encode.cuh>
*
* // Declare, allocate, and initialize device-accessible pointers for input and output
* int num_items; // e.g., 8
* int *d_in; // e.g., [0, 2, 2, 9, 5, 5, 5, 8]
* int *d_unique_out; // e.g., [ , , , , , , , ]
* int *d_counts_out; // e.g., [ , , , , , , , ]
* int *d_num_runs_out; // e.g., [ ]
* ...
*
* // Determine temporary device storage requirements
* void *d_temp_storage = NULL;
* size_t temp_storage_bytes = 0;
* cub::DeviceRunLengthEncode::Encode(d_temp_storage, temp_storage_bytes, d_in, d_unique_out, d_counts_out, d_num_runs_out, num_items);
*
* // Allocate temporary storage
* cudaMalloc(&d_temp_storage, temp_storage_bytes);
*
* // Run encoding
* cub::DeviceRunLengthEncode::Encode(d_temp_storage, temp_storage_bytes, d_in, d_unique_out, d_counts_out, d_num_runs_out, num_items);
*
* // d_unique_out <-- [0, 2, 9, 5, 8]
* // d_counts_out <-- [1, 2, 1, 3, 1]
* // d_num_runs_out <-- [5]
*
* \endcode
*
* \tparam InputIteratorT <b>[inferred]</b> Random-access input iterator type for reading input items \iterator
* \tparam UniqueOutputIteratorT <b>[inferred]</b> Random-access output iterator type for writing unique output items \iterator
* \tparam LengthsOutputIteratorT <b>[inferred]</b> Random-access output iterator type for writing output counts \iterator
* \tparam NumRunsOutputIteratorT <b>[inferred]</b> Output iterator type for recording the number of runs encountered \iterator
*/
template <
typename InputIteratorT,
typename UniqueOutputIteratorT,
typename LengthsOutputIteratorT,
typename NumRunsOutputIteratorT>
CUB_RUNTIME_FUNCTION __forceinline__
static cudaError_t Encode(
void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done.
size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation
InputIteratorT d_in, ///< [in] Pointer to the input sequence of keys
UniqueOutputIteratorT d_unique_out, ///< [out] Pointer to the output sequence of unique keys (one key per run)
LengthsOutputIteratorT d_counts_out, ///< [out] Pointer to the output sequence of run-lengths (one count per run)
NumRunsOutputIteratorT d_num_runs_out, ///< [out] Pointer to total number of runs
int num_items, ///< [in] Total number of associated key+value pairs (i.e., the length of \p d_in_keys and \p d_in_values)
cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>.
bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false.
{
typedef int OffsetT; // Signed integer type for global offsets
typedef NullType* FlagIterator; // FlagT iterator type (not used)
typedef NullType SelectOp; // Selection op (not used)
typedef Equality EqualityOp; // Default == operator
typedef cub::Sum ReductionOp; // Value reduction operator
// The lengths output value type
typedef typename If<(Equals<typename std::iterator_traits<LengthsOutputIteratorT>::value_type, void>::VALUE), // LengthT = (if output iterator's value type is void) ?
OffsetT, // ... then the OffsetT type,
typename std::iterator_traits<LengthsOutputIteratorT>::value_type>::Type LengthT; // ... else the output iterator's value type
// Generator type for providing 1s values for run-length reduction
typedef ConstantInputIterator<LengthT, OffsetT> LengthsInputIteratorT;
return DispatchReduceByKey<InputIteratorT, UniqueOutputIteratorT, LengthsInputIteratorT, LengthsOutputIteratorT, NumRunsOutputIteratorT, EqualityOp, ReductionOp, OffsetT>::Dispatch(
d_temp_storage,
temp_storage_bytes,
d_in,
d_unique_out,
LengthsInputIteratorT((LengthT) 1),
d_counts_out,
d_num_runs_out,
EqualityOp(),
ReductionOp(),
num_items,
stream,
debug_synchronous);
}
/**
* \brief Enumerates the starting offsets and lengths of all non-trivial runs (of length > 1) of same-valued keys in the sequence \p d_in.
*
* \par
* - For the <em>i</em><sup>th</sup> non-trivial run, the run's starting offset
* and its length are written to <tt>d_offsets_out[<em>i</em>]</tt> and
* <tt>d_lengths_out[<em>i</em>]</tt>, respectively.
* - The total number of runs encountered is written to \p d_num_runs_out.
* - The <tt>==</tt> equality operator is used to determine whether values are equivalent
* - \devicestorage
*
* \par Performance
*
* \par Snippet
* The code snippet below illustrates the identification of non-trivial runs within a sequence of \p int values.
* \par
* \code
* #include <cub/cub.cuh> // or equivalently <cub/device/device_run_length_encode.cuh>
*
* // Declare, allocate, and initialize device-accessible pointers for input and output
* int num_items; // e.g., 8
* int *d_in; // e.g., [0, 2, 2, 9, 5, 5, 5, 8]
* int *d_offsets_out; // e.g., [ , , , , , , , ]
* int *d_lengths_out; // e.g., [ , , , , , , , ]
* int *d_num_runs_out; // e.g., [ ]
* ...
*
* // Determine temporary device storage requirements
* void *d_temp_storage = NULL;
* size_t temp_storage_bytes = 0;
* cub::DeviceRunLengthEncode::NonTrivialRuns(d_temp_storage, temp_storage_bytes, d_in, d_offsets_out, d_lengths_out, d_num_runs_out, num_items);
*
* // Allocate temporary storage
* cudaMalloc(&d_temp_storage, temp_storage_bytes);
*
* // Run encoding
* cub::DeviceRunLengthEncode::NonTrivialRuns(d_temp_storage, temp_storage_bytes, d_in, d_offsets_out, d_lengths_out, d_num_runs_out, num_items);
*
* // d_offsets_out <-- [1, 4]
* // d_lengths_out <-- [2, 3]
* // d_num_runs_out <-- [2]
*
* \endcode
*
* \tparam InputIteratorT <b>[inferred]</b> Random-access input iterator type for reading input items \iterator
* \tparam OffsetsOutputIteratorT <b>[inferred]</b> Random-access output iterator type for writing run-offset values \iterator
* \tparam LengthsOutputIteratorT <b>[inferred]</b> Random-access output iterator type for writing run-length values \iterator
* \tparam NumRunsOutputIteratorT <b>[inferred]</b> Output iterator type for recording the number of runs encountered \iterator
*/
template <
typename InputIteratorT,
typename OffsetsOutputIteratorT,
typename LengthsOutputIteratorT,
typename NumRunsOutputIteratorT>
CUB_RUNTIME_FUNCTION __forceinline__
static cudaError_t NonTrivialRuns(
void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done.
size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation
InputIteratorT d_in, ///< [in] Pointer to input sequence of data items
OffsetsOutputIteratorT d_offsets_out, ///< [out] Pointer to output sequence of run-offsets (one offset per non-trivial run)
LengthsOutputIteratorT d_lengths_out, ///< [out] Pointer to output sequence of run-lengths (one count per non-trivial run)
NumRunsOutputIteratorT d_num_runs_out, ///< [out] Pointer to total number of runs (i.e., length of \p d_offsets_out)
int num_items, ///< [in] Total number of associated key+value pairs (i.e., the length of \p d_in_keys and \p d_in_values)
cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>.
bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false.
{
typedef int OffsetT; // Signed integer type for global offsets
typedef Equality EqualityOp; // Default == operator
return DeviceRleDispatch<InputIteratorT, OffsetsOutputIteratorT, LengthsOutputIteratorT, NumRunsOutputIteratorT, EqualityOp, OffsetT>::Dispatch(
d_temp_storage,
temp_storage_bytes,
d_in,
d_offsets_out,
d_lengths_out,
d_num_runs_out,
EqualityOp(),
num_items,
stream,
debug_synchronous);
}
};
} // CUB namespace
CUB_NS_POSTFIX // Optional outer namespace(s)
|
the_stack
|
* COMPILATION TIP
* g++ main.cpp ./structs/structs.cpp -o main
*
* */
#include "./metropolis.h"
/* =============== boundary conditions =============== */
/**
* @fn periodic_nn
* @brief periodic boundary conditions; Choose correct matrix index with
* periodic boundary conditions
*
* Input :
* @param - i : Base index
* @param - L : Highest \"legal\" index
* @param - nu : Number to add or subtract from i
*/
__device__ int periodic_nn(const int i, const int L, const int nu) {
return ( i + nu ) % L; // (i + nu) = 0,1,...L-1
}
/**
* @fn periodic
* @brief periodic boundary conditions; Choose correct matrix index with
* periodic boundary conditions
*
* Input :
* @param - i : Base index
* @param - L : Highest \"legal\" index
*/
__device__ int periodic(const int i, const int L) {
return i % L; // i = 0,1,...L-1
}
/* =============== END of boundary conditions =============== */
/* =============== Initialization =============== */
/** @fn init_allup_partialsumM
* @brief initialize spins all up and calculate partial sums for magnetization M
* @details 1st part of initialize_allup_kernel, 2nd. part is block_sumM
* */
__device__ int init_allup_partialsumM(int* Sptr,size_t Lx,size_t Ly) {
int sum=0; // partial sum of the magnetization M
// global thread index, k_x = 0,1,...N_x*M_x, k_y = 0,1,...N_y*M_y
unsigned int k_x = threadIdx.x + blockDim.x * blockIdx.x ;
unsigned int k_y = threadIdx.y + blockDim.y * blockIdx.y ;
unsigned int k = k_x + gridDim.x * blockDim.x * k_y;
for (unsigned int idx = k; idx < Lx*Ly/4; idx+= blockDim.x * gridDim.x * blockDim.y * gridDim.y ) {
reinterpret_cast<int4*>(Sptr)[idx] = {1,1,1,1} ;
int4 s4 = ((int4*) Sptr)[idx];
sum += s4.x + s4.y + s4.z + s4.w;
}
// process remaining elements
for (unsigned int idx = k + Lx*Ly/4 *4; idx < Lx*Ly; idx += 4) {
Sptr[idx] = 1;
sum += Sptr[idx];
}
return sum;
}
/** @fn blocksumM
* @brief reduce sum on thread block of partial sums of spins for magnetization M
* @details 2nd. part of initialize_allup_kernel, 1st. part is init_allup_partialsumM
* */
__device__ int block_sumM(cg::thread_group tg, int* temp, int sumresult) {
unsigned int lane = tg.thread_rank();
// Each iteration halves number of active threads
// Each thread adds to partial sum[i] its sum[lane+i]
for (unsigned int idx = tg.size()/2; idx >0; idx/=2)
{
// load the array values with this thread block into temp
temp[lane] = sumresult;
tg.sync(); // wait for all threads to store into temp
if (lane<idx) {
sumresult += temp[lane+idx];
}
tg.sync(); // wait for all threads to load
}
return sumresult; // note: only thread 0 will return full sum
}
/** @fn calcE
* @brief computes E, a summation of all unique nearest neighbor pairs of spins
* @details do summation in shared memory, that include halo cells of width 1 "to the right".
* lx+idxx*gridDim.x*blockDim.x, idxx=0,1,.. to how many multiples of gridDim.x*blockDim.x for
* multiples of thread grids to "cover" our lattice grid of spins.
* (lx+idxx*gridDim.x*blockDim.x)%Lx because we want periodic boundary conditions
* I try to future proof this by using inline function periodic
* */
__device__ int calcE(cg::thread_group & tg, int* Sptr, int* temp, size_t Lx, size_t Ly, const float J) {
int resultE =0;
const int RAD = 1; // "radius" of "halo" cells, of width 1 (in this case)
// old way of thread, block indexing
unsigned int k_x = threadIdx.x + blockDim.x * blockIdx.x ;
unsigned int k_y = threadIdx.y + blockDim.y * blockIdx.y ;
unsigned int kidx = k_x + k_y * gridDim.x * blockDim.x ;
unsigned int S_x = static_cast<int>(blockDim.x + RAD);
unsigned int S_y = static_cast<int>(blockDim.y + RAD);
unsigned int s_x = threadIdx.x + RAD; // s_x = 1,2,...S_x-1
unsigned int s_y = threadIdx.y + RAD; // s_y = 1,2,...S_y-1
// use these loops to account for elements not "covered" by the threads in grid that's launched
for (unsigned int l_y=k_y,idxy=0; l_y < Ly; idxy++, l_y += blockDim.y *gridDim.y) {
for (unsigned int l_x=k_x, idxx=0; l_x < Lx; idxx++, l_x += blockDim.x*gridDim.x ) {
int lx =0; // lx gives back global index on lattice grid of spins
int ly =0; // ly gives back global index on lattice grid of spins
/* 0, M_x
* 1
* ...
* M_x-1
* */
for (int i = threadIdx.x; i<S_x; i+=static_cast<int>(blockDim.x) ) {
for (int j = threadIdx.y; j <S_y; j+= static_cast<int>(blockDim.y) ) {
lx = i + static_cast<int>(blockDim.x*blockIdx.x);
ly = j + static_cast<int>(blockDim.y*blockIdx.y);
/* lx+idxx*gridDim.x*blockDim.x, idxx=0,1,.. to how many multiples of gridDim.x*blockDim.x for
* multiples of thread grids to "cover" our lattice grid of spins.
* (lx+idxx*gridDim.x*blockDim.x)%Lx because we want periodic boundary conditions
* I try to future proof this by using inline function periodic
* */
temp[i+j*S_x] =
static_cast<float>(
Sptr[ periodic((lx+idxx*gridDim.x*blockDim.x),Lx) +
blockDim.x * gridDim.x * periodic((ly + idxy*gridDim.y*blockDim.y),Ly) ] );
}
}
if ( l_x >= Lx || l_y >= Ly) {
return resultE;
}
tg.sync();
// do the nearest neighbor (unique) pair of spins summation entirely in shared memory
int stencilindex_x = 0; // stencil index in x-direction
int stencilindex_y = 0; // stencil index in y-direction
stencilindex_x = s_x - RAD; // = 0,1,...S_x-2 = (M_x+1)-2 = M_x -1
stencilindex_y = s_y - RAD;
// actual calculation of E
resultE += (-1.f * J) * temp[ stencilindex_x + stencilindex_y * S_x] *
(temp[ stencilindex_x + 1 + stencilindex_y * S_x]
+ temp[ stencilindex_x + (stencilindex_y + 1)*S_x] );
}
} // END of loops to make threads do "double duty" to cover other elements in our spin lattice grid that wasn't "covered" by our thread grid
return resultE;
}
__global__ void initialize_allup_kernel(int* Sptr, Sysparam* sysparams, size_t Lx, size_t Ly, const float J) {
// global thread index, k_x = 0,1,...N_x*M_x, k_y = 0,1,...N_y*M_y
// partial sum of spins for magnetization M
int sum4M = init_allup_partialsumM( Sptr, Lx,Ly);
extern __shared__ int temp[];
auto ttb = cg::this_thread_block();
int block_sum = block_sumM(ttb, temp, sum4M) ;
if (ttb.thread_rank() == 0) {
atomicAdd(&(sysparams->M), ((float) block_sum));
}
// int threadsumE = calcE(ttb, Sptr, temp, Lx,Ly,J); // for this thread, here's its partial sum contribution to total energy E
// atomicAdd(&(sysparams->E), ((float) threadsumE) );
}
__global__ void calcE_kernel(int* Sptr, Sysparam* sysparams, size_t Lx, size_t Ly, const float J) {
extern __shared__ int temp[];
auto ttb = cg::this_thread_block();
int threadsumE = calcE(ttb, Sptr, temp, Lx,Ly,J); // for this thread, here's its partial sum contribution to total energy E
atomicAdd(&(sysparams->E), ((float) threadsumE) );
}
/**
* @fn initialize_allup
* @brief "driver" function to initialize energy, spin matrix, and magnetization
* */
void initialize_allup(Spins2d& spins2d, Sysparam_ptr& sysParams,
const std::array<int,3> MAXGRIDSIZES,const dim3 M_is)
{
size_t Lx = spins2d.L_is[0]; // total number of spins of system
size_t Ly = spins2d.L_is[1]; // total number of spins of system
const float J = spins2d.J;
unsigned int RAD = 1; // "radius" or width of "halo" cells needed
/* ========== (thread) grid,block dims ========== */
unsigned long MAX_BLOCKS_y = (MAXGRIDSIZES[1] + M_is.y - 1)/ M_is.y;
// notice how we're only launching 1/4 of Ly threads in y-direction needed
unsigned int N_y = std::min( MAX_BLOCKS_y, ((Ly/4 + M_is.y - 1)/ M_is.y));
unsigned int N_y_full = std::min( MAX_BLOCKS_y, ((Ly + M_is.y - 1)/ M_is.y));
unsigned long MAX_BLOCKS_x = (MAXGRIDSIZES[0] + M_is.x - 1)/ M_is.x;
// notice how we're only launching 1/4 of Lx threads in x-direction needed
unsigned int N_x = std::min( MAX_BLOCKS_x, ((Lx/4 + M_is.x - 1)/ M_is.x));
unsigned int N_x_full = std::min( MAX_BLOCKS_x, ((Lx + M_is.x - 1)/ M_is.x));
dim3 N_is { N_x,N_y }; // single (thread) block dims., i.e. number of threads in a single (thread) block
dim3 N_is_full { N_x_full,N_y_full }; // single (thread) block dims., i.e. number of threads in a single (thread) block
int sharedBytes = (M_is.x+RAD)*(M_is.y + RAD)* sizeof(int);
/* ========== END of (thread) grid,block dims ========== */
initialize_allup_kernel<<<N_is,M_is, sharedBytes>>>(spins2d.S.get(),sysParams.d_sysparams.get(),Lx,Ly,J);
calcE_kernel<<<N_is_full,M_is,sharedBytes>>>(spins2d.S.get(),sysParams.d_sysparams.get(),Lx,Ly,J);
} // end of function initialize_allup
__device__ int unifl2intspin(const float unif) {
return (2 * static_cast<int>(floorf(2.f*unif)) - 1);
}
/** @fn init_rand_partialsumM
* @brief initialize spins all up and calculate partial sums for magnetization M
* @details 1st part of initialize_allup_kernel, 2nd. part is block_sumM
* */
__device__ int init_rand_partialsumM(int* Sptr,size_t Lx,size_t Ly,curandState *state) {
int sum=0; // partial sum of the magnetization M
// global thread index, k_x = 0,1,...N_x*M_x, k_y = 0,1,...N_y*M_y
unsigned int k_x = threadIdx.x + blockDim.x * blockIdx.x ;
unsigned int k_y = threadIdx.y + blockDim.y * blockIdx.y ;
unsigned int k = k_x + gridDim.x * blockDim.x * k_y;
curandState localState = state[k];
for (unsigned int idx = k; idx < Lx*Ly/4; idx+= blockDim.x * gridDim.x * blockDim.y * gridDim.y ) {
float ranf = curand_uniform(&localState);
int ranint0 = unifl2intspin(ranf);
ranf = curand_uniform(&localState);
int ranint1 = unifl2intspin(ranf);
ranf = curand_uniform(&localState);
int ranint2 = unifl2intspin(ranf);
ranf = curand_uniform(&localState);
int ranint3 = unifl2intspin(ranf);
reinterpret_cast<int4*>(Sptr)[idx] = {ranint0,ranint1,ranint2,ranint3} ;
int4 s4 = ((int4*) Sptr)[idx];
sum += s4.x + s4.y + s4.z + s4.w;
}
// process remaining elements
for (unsigned int idx = k + Lx*Ly/4 *4; idx < Lx*Ly; idx += 4) {
float ranf = curand_uniform(&localState);
int ranint = unifl2intspin(ranf);
Sptr[idx] = ranint;
sum += Sptr[idx];
}
return sum;
}
__global__ void initialize_rand_kernel(int* Sptr, Sysparam* sysparams, size_t Lx, size_t Ly,
curandState *state) {
// global thread index, k_x = 0,1,...N_x*M_x, k_y = 0,1,...N_y*M_y
// partial sum of spins for magnetization M
int sum4M = init_rand_partialsumM( Sptr, Lx,Ly, state);
extern __shared__ int temp[];
auto ttb = cg::this_thread_block();
int block_sum = block_sumM(ttb, temp, sum4M) ;
if (ttb.thread_rank() == 0) {
atomicAdd(&(sysparams->M), ((float) block_sum));
}
}
/**
* @fn initialize_rand
* @brief "driver" function to initialize energy, spin matrix, and magnetization
* */
void initialize_rand(Spins2d& spins2d, Sysparam_ptr& sysParams,
const std::array<int,3> MAXGRIDSIZES,devStatesXOR & devStates,const dim3 M_is)
{
size_t Lx = spins2d.L_is[0]; // total number of spins of system
size_t Ly = spins2d.L_is[1]; // total number of spins of system
const float J = spins2d.J;
unsigned int RAD = 1; // "radius" or width of "halo" cells needed
/* ========== (thread) grid,block dims ========== */
unsigned long MAX_BLOCKS_y = (MAXGRIDSIZES[1] + M_is.y - 1)/ M_is.y;
// notice how we're only launching 1/4 of Ly threads in y-direction needed
unsigned int N_y = std::min( MAX_BLOCKS_y, ((Ly/4 + M_is.y - 1)/ M_is.y));
unsigned int N_y_full = std::min( MAX_BLOCKS_y, ((Ly + M_is.y - 1)/ M_is.y));
unsigned long MAX_BLOCKS_x = (MAXGRIDSIZES[0] + M_is.x - 1)/ M_is.x;
// notice how we're only launching 1/4 of Lx threads in x-direction needed
unsigned int N_x = std::min( MAX_BLOCKS_x, ((Lx/4 + M_is.x - 1)/ M_is.x));
unsigned int N_x_full = std::min( MAX_BLOCKS_x, ((Lx + M_is.x - 1)/ M_is.x));
dim3 N_is { N_x,N_y }; // single (thread) block dims., i.e. number of threads in a single (thread) block
dim3 N_is_full { N_x_full,N_y_full }; // single (thread) block dims., i.e. number of threads in a single (thread) block
int sharedBytes = (M_is.x+RAD)*(M_is.y + RAD)* sizeof(int);
/* ========== END of (thread) grid,block dims ========== */
initialize_rand_kernel<<<N_is,M_is, sharedBytes>>>(spins2d.S.get(),sysParams.d_sysparams.get(),Lx,Ly,
devStates.devStates.get());
calcE_kernel<<<N_is_full,M_is,sharedBytes>>>(spins2d.S.get(),sysParams.d_sysparams.get(),Lx,Ly,J);
} // end of function initialize_allup
/* =============== END of initialization =============== */
/* =============== Metropolis algorithm =============== */
__device__ int calcintDeltaE(int* temp, const unsigned int S_x, const unsigned int S_y,
const unsigned int s_x, const unsigned int s_y, const int RAD)
{
int resultDeltaE = 2 * temp[ s_x + s_y * S_x] *
(temp[ s_x + 1 + s_y * S_x]
+ temp[ s_x + (s_y + 1)*S_x]
+ temp[ s_x - 1 + s_y * S_x]
+ temp[ s_x + (s_y - 1)*S_x] );
return resultDeltaE;
}
__device__ Sysparam spinflips(cg::thread_group & tg, int* Sptr,
int* temp, size_t Lx, size_t Ly, const float J, curandState *state, const float T)
{
Sysparam results_sysparams { 0.f, 0.f, 0.f };
const int RAD = 1; // "radius" of "halo" cells, of width 1 (in this case)
// old way of thread, block indexing
unsigned int k_x = threadIdx.x + blockDim.x * blockIdx.x ;
unsigned int k_y = threadIdx.y + blockDim.y * blockIdx.y ;
unsigned int S_x = static_cast<int>(blockDim.x + 2*RAD);
unsigned int S_y = static_cast<int>(blockDim.y + 2*RAD);
unsigned int s_x = threadIdx.x + RAD; // s_x = 1,2,...S_x-2
unsigned int s_y = threadIdx.y + RAD; // s_y = 1,2,...S_y-2
// use these loops to account for elements not "covered" by the threads in grid that's launched
for (unsigned int l_y=k_y,idxy=0; l_y < Ly; idxy++, l_y += blockDim.y *gridDim.y) {
for (unsigned int l_x=k_x, idxx=0; l_x < Lx; idxx++, l_x += blockDim.x*gridDim.x ) {
int lx =0; // lx gives back global index on lattice grid of spins
int ly =0; // ly gives back global index on lattice grid of spins
/* 0, M_x
* 1
* ...
* M_x-1
* */
for (int i = threadIdx.x; i<S_x; i+=static_cast<int>(blockDim.x) ) {
for (int j = threadIdx.y; j <S_y; j+= static_cast<int>(blockDim.y) ) {
lx = i + static_cast<int>(blockDim.x*blockIdx.x);
ly = j + static_cast<int>(blockDim.y*blockIdx.y);
/* lx+idxx*gridDim.x*blockDim.x, idxx=0,1,.. to how many multiples of gridDim.x*blockDim.x for
* multiples of thread grids to "cover" our lattice grid of spins.
* (lx+idxx*gridDim.x*blockDim.x)%Lx because we want periodic boundary conditions
* I try to future proof this by using inline function periodic
* */
temp[i+j*S_x] =
static_cast<float>(
Sptr[ (lx+idxx*gridDim.x*blockDim.x) % Lx +
blockDim.x * gridDim.x * ( (ly + idxy*gridDim.y*blockDim.y) % Ly ) ] );
}
}
if ( l_x >= Lx || l_y >= Ly) {
return results_sysparams;
}
tg.sync();
// global index k
size_t k = l_x + gridDim.x*blockDim.x * l_y;
/* Copy state to local memory for efficiency */
curandState localState = state[k];
// so-called "checkerboard" - a "checkerboard" pattern is necessitated because the
// change in energy Delta E is dependent upon nearest neighbors, the stencil operation,
// the energy at present time t. This is unlike say the finite difference method approximating
// partial differentiation equations where we can say the new update is dependent upon values at previous time steps
// if tg.thread_rank() even
// if ( ( tg.thread_rank() % 2) == 0)
if ((( k_x+k_y ) % 2) == 0 )
// if ( (k % 2) == 0 )
{
// pick ALL the "even" spins in this thread block
// do the nearest neighbor (unique) pair of spins summation entirely in shared memory
int intdeltaE = calcintDeltaE(temp, S_x,S_y,s_x,s_y,RAD);
float Wprob = curand_uniform(&localState);
float transprob = expf( -1.f / T * ( static_cast<float>( intdeltaE) ) );
// roll dice, see if we transition or not, given transprob
// if ( curand_uniform(&localState) <= transprob[intdeltaE +8] )
if (intdeltaE <0 || Wprob <= transprob)
{
// instead of loading entire thread block + halo cells to shared memory again, just make single change
// Accept!
temp[ s_x + s_y * S_x] *= -1; // flip 1 spin and accept new spin config
results_sysparams.E += ((float) intdeltaE) * J ;
results_sysparams.M += 2.f*((float) temp[s_x+s_y*S_x]) ;
}
}
tg.sync();
// if tg.thread_rank() odd
// if ( (tg.thread_rank() % 2) == 1)
// if (( k % 2) == 1 )
if (( (k_x + k_y) % 2) == 1 )
{
// do the nearest neighbor (unique) pair of spins summation entirely in shared memory
int intdeltaE = calcintDeltaE(temp, S_x,S_y,s_x,s_y,RAD);
float Wprob = curand_uniform(&localState);
float transprob = expf( -1.f / T * ( static_cast<float>( intdeltaE) ) );
// roll dice, see if we transition or not, given transprob
// if ( curand_uniform(&localState) <= transprob[intdeltaE +8] )
if (intdeltaE <0 || Wprob <= transprob)
{
// Accept!
temp[ s_x + s_y * S_x] *= -1; // flip 1 spin and accept new spin config
results_sysparams.E += ((float) intdeltaE) * J ;
results_sysparams.M += 2.f*((float) temp[s_x+s_y*S_x]) ;
}
}
tg.sync();
// coalesce global memory access with all threads consecutively in memory
Sptr[k] = temp[ s_x + s_y * S_x];
tg.sync(); // Added
}
} // END of loops to make threads do "double duty" to cover other elements in our spin lattice grid that wasn't "covered" by our thread grid
return results_sysparams ;
};
__global__ void metropolis_kernel(int* Sptr, Sysparam* sysparams, size_t Lx, size_t Ly, const float J,
curandState *state )
{
extern __shared__ int temp[];
auto ttb = cg::this_thread_block();
dim3 ttb_gidx = ttb.group_index();
// unsigned int j = ttb_gidx.x + ttb_gidx.y * gridDim.x;
unsigned int j = blockIdx.x + gridDim.x * blockIdx.y;
// if j is even, 0, 2, ... < N_x*N_y
/* if ((j % 2) ==0)
{
// Sysparam spinflipresults = spinflips(ttb, Sptr, transprob, temp, Lx,Ly,J, state);
Sysparam spinflipresults = spinflips(ttb, Sptr, temp, Lx,Ly,J, state,sysparams->T);
atomicAdd(&(sysparams->E), spinflipresults.E );
atomicAdd(&(sysparams->M), spinflipresults.M );
}
ttb.sync();
if ((j % 2) != 0) {
// Sysparam spinflipresults = spinflips(ttb, Sptr, transprob, temp, Lx,Ly,J, state);
Sysparam spinflipresults = spinflips(ttb, Sptr, temp, Lx,Ly,J, state,sysparams->T);
atomicAdd(&(sysparams->E), spinflipresults.E );
atomicAdd(&(sysparams->M), spinflipresults.M );
}
ttb.sync();
*/
// Sysparam spinflipresults = spinflips(ttb, Sptr, transprob, temp, Lx,Ly,J, state, sysparams->T );
Sysparam spinflipresults = spinflips(ttb, Sptr, temp, Lx,Ly,J, state, sysparams->T );
atomicAdd(&(sysparams->E), spinflipresults.E );
atomicAdd(&(sysparams->M), spinflipresults.M );
}
__global__ void update_avgs(Sysparam* sysparams,Avg* avgs) {
auto ttb = cg::this_thread_block();
dim3 ttb_gidx = ttb.group_index();
if ( ((ttb.thread_rank() == 0) && (ttb_gidx.x == 0)) || ( (threadIdx.x == 0)&&(blockIdx.x==0)) ) {
atomicAdd(&(avgs->Eavg), sysparams->E);
atomicAdd(&(avgs->Mavg), sysparams->M);
atomicAdd(&(avgs->Esq_avg), (sysparams->E)*(sysparams->E));
atomicAdd(&(avgs->Msq_avg), (sysparams->M)*(sysparams->M));
atomicAdd(&(avgs->absM_avg), fabsf(sysparams->M));
atomicAdd(&(avgs->M4_avg), (sysparams->M)*(sysparams->M)*(sysparams->M)*(sysparams->M));
}
}
/**
* @fn metropolis
* @brief "driver" function for Metropolis algorithm, single-spin flip scheme for 2-dim. Ising model
* */
//void metropolis(Spins2d& spins2d, Sysparam_ptr& sysParams,Avg_ptr& averages,TransProb_ptr& transProbs,
void metropolis(Spins2d& spins2d, Sysparam_ptr& sysParams,Avg_ptr& averages,
const std::array<int,3> MAXGRIDSIZES,const dim3 M_is, devStatesXOR & devStates, const unsigned int trials) {
size_t Lx = spins2d.L_is[0]; // total number of spins of system
size_t Ly = spins2d.L_is[1]; // total number of spins of system
const float J = spins2d.J;
unsigned int RAD = 1; // "radius" or width of "halo" cells needed
/* ========== (thread) grid,block dims ========== */
unsigned long MAX_BLOCKS_y = (MAXGRIDSIZES[1] + M_is.y - 1)/ M_is.y;
unsigned int N_y = std::min( MAX_BLOCKS_y, ((Ly + M_is.y - 1)/ M_is.y));
// notice how we're only launching 1/4 of Ly threads in y-direction needed
unsigned int N_y_4th = std::min( MAX_BLOCKS_y, ((Ly/4 + M_is.y - 1)/ M_is.y));
unsigned long MAX_BLOCKS_x = (MAXGRIDSIZES[0] + M_is.x - 1)/ M_is.x;
unsigned int N_x = std::min( MAX_BLOCKS_x, ((Lx + M_is.x - 1)/ M_is.x));
// notice how we're only launching 1/4 of Lx threads in x-direction needed
unsigned int N_x_4th = std::min( MAX_BLOCKS_x, ((Lx/4 + M_is.x - 1)/ M_is.x));
dim3 N_is { N_x,N_y };
dim3 N_is_4th { N_x_4th,N_y_4th }; // single (thread) block dims., i.e. number of threads in a single (thread) block
int sharedBytes = (M_is.x+2*RAD)*(M_is.y + 2*RAD)* sizeof(int);
/* ========== END of (thread) grid,block dims ========== */
for (int cycles=1; cycles <= trials; cycles++) {
metropolis_kernel<<< N_is,M_is,sharedBytes>>>( spins2d.S.get(), sysParams.d_sysparams.get(),
Lx,Ly, J,
devStates.devStates.get() );
update_avgs<<<1,1>>>( sysParams.d_sysparams.get(), averages.d_avgs.get() );
}
}
/* =============== END of Metropolis algorithm =============== */
|
the_stack
|
/* classifier parameters */
/************************************
* Notes:
* To paralleism the filter,
* these monolithic arrays may
* need to be splitted or duplicated
***********************************/
static int *stages_array;
static int *rectangles_array;
static int *weights_array;
static int *alpha1_array;
static int *alpha2_array;
static int *tree_thresh_array;
static int *stages_thresh_array;
static int **scaled_rectangles_array;
int clock_counter = 0;
float n_features = 0;
int iter_counter = 0;
/* compute integral images */
void integralImages( MyImage *src, MyIntImage *sum, MyIntImage *sqsum );
/* scale down the image */
void ScaleImage_Invoker( myCascade* _cascade, float _factor, int sum_row, int sum_col, std::vector<MyRect>& _vec);
/* compute scaled image */
void nearestNeighbor (MyImage *src, MyImage *dst);
/* rounding function */
inline int myRound( float value )
{
return (int)(value + (value >= 0 ? 0.5 : -0.5));
}
/*******************************************************
* Function: detectObjects
* Description: It calls all the major steps
******************************************************/
std::vector<MyRect> detectObjects( MyImage* _img, MySize minSize, MySize maxSize, myCascade* cascade,
float scaleFactor, int minNeighbors, int total_nodes)
{
/* group overlaping windows */
const float GROUP_EPS = 0.4f;
/* pointer to input image */
MyImage *img = _img;
/***********************************
* create structs for images
* see haar.h for details
* img1: normal image (unsigned char)
* sum1: integral image (int)
* sqsum1: square integral image (int)
**********************************/
MyImage image1Obj;
MyIntImage sum1Obj;
MyIntImage sqsum1Obj;
/* pointers for the created structs */
MyImage *img1 = &image1Obj;
MyIntImage *sum1 = &sum1Obj;
MyIntImage *sqsum1 = &sqsum1Obj;
/********************************************************
* allCandidates is the preliminaray face candidate,
* which will be refined later.
*
* std::vector is a sequential container
* http://en.wikipedia.org/wiki/Sequence_container_(C++)
*
* Each element of the std::vector is a "MyRect" struct
* MyRect struct keeps the info of a rectangle (see haar.h)
* The rectangle contains one face candidate
*****************************************************/
std::vector<MyRect> allCandidates;
/* scaling factor */
float factor;
/* maxSize */
if( maxSize.height == 0 || maxSize.width == 0 )
{
maxSize.height = img->height;
maxSize.width = img->width;
}
/* window size of the training set */
MySize winSize0 = cascade->orig_window_size;
/* malloc for img1: unsigned char */
createImage(img->width, img->height, img1);
/* malloc for sum1: unsigned char */
createSumImage(img->width, img->height, sum1);
/* malloc for sqsum1: unsigned char */
createSumImage(img->width, img->height, sqsum1);
/* initial scaling factor */
factor = 1;
#ifdef GPU
int *d_rectangles_array;
cudaMalloc((void**)&d_rectangles_array, sizeof(int)*total_nodes*12);
cudaMemcpy(d_rectangles_array, rectangles_array, sizeof(int)*total_nodes*12, cudaMemcpyHostToDevice);
#endif
/* iterate over the image pyramid */
for( factor = 1; ; factor *= scaleFactor )
{
/* iteration counter */
iter_counter++;
/* size of the image scaled up */
MySize winSize = { myRound(winSize0.width*factor), myRound(winSize0.height*factor) };
/* size of the image scaled down (from bigger to smaller) */
MySize sz = { static_cast<int>( img->width/factor ), static_cast<int>( img->height/factor ) };
/* difference between sizes of the scaled image and the original detection window */
MySize sz1 = { sz.width - winSize0.width, sz.height - winSize0.height };
/* if the actual scaled image is smaller than the original detection window, break */
if( sz1.width < 0 || sz1.height < 0 )
break;
/* if a minSize different from the original detection window is specified, continue to the next scaling */
if( winSize.width < minSize.width || winSize.height < minSize.height )
continue;
/*************************************
* Set the width and height of
* img1: normal image (unsigned char)
* sum1: integral image (int)
* sqsum1: squared integral image (int)
* see image.c for details
************************************/
setImage(sz.width, sz.height, img1);
setSumImage(sz.width, sz.height, sum1);
setSumImage(sz.width, sz.height, sqsum1);
/***************************************
* Compute-intensive step:
* building image pyramid by downsampling
* downsampling using nearest neighbor
**************************************/
nearestNeighbor(img, img1);
/***************************************************
* Compute-intensive step:
* At each scale of the image pyramid,
* compute a new integral and squared integral image
***************************************************/
integralImages(img1, sum1, sqsum1);
/* sets images for haar classifier cascade */
/**************************************************
* Note:
* Summing pixels within a haar window is done by
* using four corners of the integral image:
* http://en.wikipedia.org/wiki/Summed_area_table
*
* This function loads the four corners,
* but does not do compuation based on four coners.
* The computation is done next in ScaleImage_Invoker
*************************************************/
setImageForCascadeClassifier(cascade, sum1, sqsum1,
#ifdef GPU
d_rectangles_array,
#endif
total_nodes);
/* print out for each scale of the image pyramid */
printf("detecting faces, iter := %d\n", iter_counter);
/****************************************************
* Process the current scale with the cascaded fitler.
* The main computations are invoked by this function.
* Optimization oppurtunity:
* the same cascade filter is invoked each time
***************************************************/
ScaleImage_Invoker(cascade, factor, sum1->height, sum1->width,
allCandidates);
} /* end of the factor loop, finish all scales in pyramid*/
if( minNeighbors != 0)
{
groupRectangles(allCandidates, minNeighbors, GROUP_EPS);
}
freeImage(img1);
freeSumImage(sum1);
freeSumImage(sqsum1);
#ifdef GPU
cudaFree(d_rectangles_array);
#endif
return allCandidates;
}
/***********************************************
* Note:
* The int_sqrt is softwar integer squre root.
* GPU has hardware for floating squre root (sqrtf).
* In GPU, it is wise to convert an int variable
* into floating point, and use HW sqrtf function.
* More info:
* http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#standard-functions
**********************************************/
/*****************************************************
* The int_sqrt is only used in runCascadeClassifier
* If you want to replace int_sqrt with HW sqrtf in GPU,
* simple look into the runCascadeClassifier function.
*****************************************************/
unsigned int int_sqrt (unsigned int value)
{
int i;
unsigned int a = 0, b = 0, c = 0;
for (i=0; i < (32 >> 1); i++)
{
c<<= 2;
#define UPPERBITS(value) (value>>30)
c += UPPERBITS(value);
#undef UPPERBITS
value <<= 2;
a <<= 1;
b = (a<<1) | 1;
if (c >= b)
{
c -= b;
a++;
}
}
return a;
}
#ifdef GPU
__global__
void filter_kernel (const int*__restrict d_rectangles_array,
int**__restrict d_scaled_rectangles_array,
int*__restrict data, int width, int total_nodes)
{
int gid = blockDim.x * blockIdx.x + threadIdx.x;
if (gid >= total_nodes) return;
int idx = gid * 12;
for (int k = 0; k < 3; k++)
{
int tr_x = d_rectangles_array[idx + k * 4];
int tr_y = d_rectangles_array[idx + 1 + k * 4];
int tr_width = d_rectangles_array[idx + 2 + k * 4];
int tr_height = d_rectangles_array[idx + 3 + k * 4];
int *p0 = data + width * (tr_y) + (tr_x);
int *p1 = data + width * (tr_y) + (tr_x + tr_width);
int *p2 = data + width * (tr_y + tr_height) + (tr_x);
int *p3 = data + width * (tr_y + tr_height) + (tr_x + tr_width);
if (k < 2)
{
d_scaled_rectangles_array[idx + k * 4] = p0;
d_scaled_rectangles_array[idx + k * 4 + 1] = p1;
d_scaled_rectangles_array[idx + k * 4 + 2] = p2;
d_scaled_rectangles_array[idx + k * 4 + 3] = p3;
}
else
{
bool z = ((tr_x == 0) && (tr_y == 0) && (tr_width == 0) && (tr_height == 0));
d_scaled_rectangles_array[idx + k * 4] = z ? NULL : p0;
d_scaled_rectangles_array[idx + k * 4 + 1] = z ? NULL : p1;
d_scaled_rectangles_array[idx + k * 4 + 2] = z ? NULL : p2;
d_scaled_rectangles_array[idx + k * 4 + 3] = z ? NULL : p3;
} /* end of branch if(k<2) */
} /* end of k loop */
}
#endif
void setImageForCascadeClassifier( myCascade* _cascade, MyIntImage* _sum, MyIntImage* _sqsum,
#ifdef GPU
int* d_rectangles_array,
#endif
int total_nodes)
{
MyIntImage *sum = _sum;
MyIntImage *sqsum = _sqsum;
myCascade* cascade = _cascade;
MyRect equRect;
cascade->sum = *sum;
cascade->sqsum = *sqsum;
equRect.x = equRect.y = 0;
equRect.width = cascade->orig_window_size.width;
equRect.height = cascade->orig_window_size.height;
cascade->inv_window_area = equRect.width*equRect.height;
cascade->p0 = (sum->data) ;
cascade->p1 = (sum->data + equRect.width - 1) ;
cascade->p2 = (sum->data + sum->width*(equRect.height - 1));
cascade->p3 = (sum->data + sum->width*(equRect.height - 1) + equRect.width - 1);
cascade->pq0 = (sqsum->data);
cascade->pq1 = (sqsum->data + equRect.width - 1) ;
cascade->pq2 = (sqsum->data + sqsum->width*(equRect.height - 1));
cascade->pq3 = (sqsum->data + sqsum->width*(equRect.height - 1) + equRect.width - 1);
#ifdef GPU
/****************************************
* Load the index of the four corners
* of the filter rectangle
**************************************/
int **d_scaled_rectangles_array;
cudaMalloc((void**)&d_scaled_rectangles_array, sizeof(int*)*total_nodes*12);
dim3 grid ((total_nodes+255)/256);
dim3 block (256);
filter_kernel<<<grid, block>>>(d_rectangles_array,
d_scaled_rectangles_array, sum->data, sum->width, total_nodes);
cudaMemcpy(scaled_rectangles_array, d_scaled_rectangles_array,
sizeof(int*)*total_nodes*12, cudaMemcpyDeviceToHost);
cudaFree(d_scaled_rectangles_array);
#else
/****************************************
* Load the index of the four corners
* of the filter rectangle
**************************************/
int r_index = 0;
// collapsed the nested loop on the cpu too
for (int i = 0; i < total_nodes; i++)
{
/* loop over the number of rectangles */
for(int k = 0; k < 3; k++)
{
MyRect tr;
tr.x = rectangles_array[r_index + k*4];
tr.width = rectangles_array[r_index + 2 + k*4];
tr.y = rectangles_array[r_index + 1 + k*4];
tr.height = rectangles_array[r_index + 3 + k*4];
if (k < 2)
{
scaled_rectangles_array[r_index + k*4] = (sum->data + sum->width*(tr.y ) + (tr.x )) ;
scaled_rectangles_array[r_index + k*4 + 1] = (sum->data + sum->width*(tr.y ) + (tr.x + tr.width)) ;
scaled_rectangles_array[r_index + k*4 + 2] = (sum->data + sum->width*(tr.y + tr.height) + (tr.x ));
scaled_rectangles_array[r_index + k*4 + 3] = (sum->data + sum->width*(tr.y + tr.height) + (tr.x + tr.width));
}
else
{
if ((tr.x == 0)&& (tr.y == 0) &&(tr.width == 0) &&(tr.height == 0))
{
scaled_rectangles_array[r_index + k*4] = NULL ;
scaled_rectangles_array[r_index + k*4 + 1] = NULL ;
scaled_rectangles_array[r_index + k*4 + 2] = NULL;
scaled_rectangles_array[r_index + k*4 + 3] = NULL;
}
else
{
scaled_rectangles_array[r_index + k*4] = (sum->data + sum->width*(tr.y ) + (tr.x )) ;
scaled_rectangles_array[r_index + k*4 + 1] = (sum->data + sum->width*(tr.y ) + (tr.x + tr.width)) ;
scaled_rectangles_array[r_index + k*4 + 2] = (sum->data + sum->width*(tr.y + tr.height) + (tr.x ));
scaled_rectangles_array[r_index + k*4 + 3] = (sum->data + sum->width*(tr.y + tr.height) + (tr.x + tr.width));
}
} /* end of branch if(k<2) */
} /* end of k loop*/
r_index+=12;
} /* end i loop */
#endif
}
/****************************************************
* evalWeakClassifier:
* the actual computation of a haar filter.
* More info:
* http://en.wikipedia.org/wiki/Haar-like_features
***************************************************/
inline int evalWeakClassifier(int variance_norm_factor, int p_offset, int tree_index, int w_index, int r_index )
{
/* the node threshold is multiplied by the standard deviation of the image */
int t = tree_thresh_array[tree_index] * variance_norm_factor;
int sum = (*(scaled_rectangles_array[r_index] + p_offset)
- *(scaled_rectangles_array[r_index + 1] + p_offset)
- *(scaled_rectangles_array[r_index + 2] + p_offset)
+ *(scaled_rectangles_array[r_index + 3] + p_offset))
* weights_array[w_index];
sum += (*(scaled_rectangles_array[r_index+4] + p_offset)
- *(scaled_rectangles_array[r_index + 5] + p_offset)
- *(scaled_rectangles_array[r_index + 6] + p_offset)
+ *(scaled_rectangles_array[r_index + 7] + p_offset))
* weights_array[w_index + 1];
if ((scaled_rectangles_array[r_index+8] != NULL))
sum += (*(scaled_rectangles_array[r_index+8] + p_offset)
- *(scaled_rectangles_array[r_index + 9] + p_offset)
- *(scaled_rectangles_array[r_index + 10] + p_offset)
+ *(scaled_rectangles_array[r_index + 11] + p_offset))
* weights_array[w_index + 2];
if(sum >= t)
return alpha2_array[tree_index];
else
return alpha1_array[tree_index];
}
int runCascadeClassifier( myCascade* _cascade, MyPoint pt, int start_stage )
{
int p_offset, pq_offset;
int i, j;
unsigned int mean;
unsigned int variance_norm_factor;
int haar_counter = 0;
int w_index = 0;
int r_index = 0;
int stage_sum;
myCascade* cascade;
cascade = _cascade;
p_offset = pt.y * (cascade->sum.width) + pt.x;
pq_offset = pt.y * (cascade->sqsum.width) + pt.x;
/**************************************************************************
* Image normalization
* mean is the mean of the pixels in the detection window
* cascade->pqi[pq_offset] are the squared pixel values (using the squared integral image)
* inv_window_area is 1 over the total number of pixels in the detection window
*************************************************************************/
variance_norm_factor = (cascade->pq0[pq_offset] - cascade->pq1[pq_offset] - cascade->pq2[pq_offset] + cascade->pq3[pq_offset]);
mean = (cascade->p0[p_offset] - cascade->p1[p_offset] - cascade->p2[p_offset] + cascade->p3[p_offset]);
variance_norm_factor = (variance_norm_factor*cascade->inv_window_area);
variance_norm_factor = variance_norm_factor - mean*mean;
/***********************************************
* Note:
* The int_sqrt is softwar integer squre root.
* GPU has hardware for floating squre root (sqrtf).
* In GPU, it is wise to convert the variance norm
* into floating point, and use HW sqrtf function.
* More info:
* http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#standard-functions
**********************************************/
if( variance_norm_factor > 0 )
variance_norm_factor = int_sqrt(variance_norm_factor);
else
variance_norm_factor = 1;
/**************************************************
* The major computation happens here.
* For each scale in the image pyramid,
* and for each shifted step of the filter,
* send the shifted window through cascade filter.
*
* Note:
*
* Stages in the cascade filter are independent.
* However, a face can be rejected by any stage.
* Running stages in parallel delays the rejection,
* which induces unnecessary computation.
*
* Filters in the same stage are also independent,
* except that filter results need to be merged,
* and compared with a per-stage threshold.
*************************************************/
for( i = start_stage; i < cascade->n_stages; i++ )
{
/****************************************************
* A shared variable that induces false dependency
*
* To avoid it from limiting parallelism,
* we can duplicate it multiple times,
* e.g., using stage_sum_array[number_of_threads].
* Then threads only need to sync at the end
***************************************************/
stage_sum = 0;
for( j = 0; j < stages_array[i]; j++ )
{
/**************************************************
* Send the shifted window to a haar filter.
**************************************************/
stage_sum += evalWeakClassifier(variance_norm_factor, p_offset, haar_counter, w_index, r_index);
n_features++;
haar_counter++;
w_index+=3;
r_index+=12;
} /* end of j loop */
/**************************************************************
* threshold of the stage.
* If the sum is below the threshold,
* no faces are detected,
* and the search is abandoned at the i-th stage (-i).
* Otherwise, a face is detected (1)
**************************************************************/
/* the number "0.4" is empirically chosen for 5kk73 */
if( stage_sum < 0.4*stages_thresh_array[i] ){
return -i;
} /* end of the per-stage thresholding */
} /* end of i loop */
return 1;
}
void ScaleImage_Invoker( myCascade* _cascade, float _factor, int sum_row, int sum_col, std::vector<MyRect>& _vec)
{
myCascade* cascade = _cascade;
float factor = _factor;
MyPoint p;
int result;
int y1, y2, x2, x, y, step;
std::vector<MyRect> *vec = &_vec;
MySize winSize0 = cascade->orig_window_size;
MySize winSize;
winSize.width = myRound(winSize0.width*factor);
winSize.height = myRound(winSize0.height*factor);
y1 = 0;
/********************************************
* When filter window shifts to image boarder,
* some margin need to be kept
*********************************************/
y2 = sum_row - winSize0.height;
x2 = sum_col - winSize0.width;
/********************************************
* Step size of filter window shifting
* Reducing step makes program faster,
* but decreases quality of detection.
* example:
* step = factor > 2 ? 1 : 2;
*
* For 5kk73,
* the factor and step can be kept constant,
* unless you want to change input image.
*
* The step size is set to 1 for 5kk73,
* i.e., shift the filter window by 1 pixel.
*******************************************/
step = 1;
/**********************************************
* Shift the filter window over the image.
* Each shift step is independent.
* Shared data structure may limit parallelism.
*
* Some random hints (may or may not work):
* Split or duplicate data structure.
* Merge functions/loops to increase locality
* Tiling to increase computation-to-memory ratio
*********************************************/
for( x = 0; x <= x2; x += step )
for( y = y1; y <= y2; y += step )
{
p.x = x;
p.y = y;
/*********************************************
* Optimization Oppotunity:
* The same cascade filter is used each time
********************************************/
result = runCascadeClassifier( cascade, p, 0 );
/*******************************************************
* If a face is detected,
* record the coordinates of the filter window
* the "push_back" function is from std:vec, more info:
* http://en.wikipedia.org/wiki/Sequence_container_(C++)
*
* Note that, if the filter runs on GPUs,
* the push_back operation is not possible on GPUs.
* The GPU may need to use a simpler data structure,
* e.g., an array, to store the coordinates of face,
* which can be later memcpy from GPU to CPU to do push_back
*******************************************************/
if( result > 0 )
{
MyRect r = {myRound(x*factor), myRound(y*factor), winSize.width, winSize.height};
vec->push_back(r);
}
}
}
/*****************************************************
* Compute the integral image (and squared integral)
* Integral image helps quickly sum up an area.
* More info:
* http://en.wikipedia.org/wiki/Summed_area_table
****************************************************/
void integralImages( MyImage *src, MyIntImage *sum, MyIntImage *sqsum )
{
int x, y, s, sq, t, tq;
unsigned char it;
int height = src->height;
int width = src->width;
unsigned char *data = src->data;
int * sumData = sum->data;
int * sqsumData = sqsum->data;
for( y = 0; y < height; y++)
{
s = 0;
sq = 0;
/* loop over the number of columns */
for( x = 0; x < width; x ++)
{
it = data[y*width+x];
/* sum of the current row (integer)*/
s += it;
sq += it*it;
t = s;
tq = sq;
if (y != 0)
{
t += sumData[(y-1)*width+x];
tq += sqsumData[(y-1)*width+x];
}
sumData[y*width+x]=t;
sqsumData[y*width+x]=tq;
}
}
}
/***********************************************************
* This function downsample an image using nearest neighbor
* It is used to build the image pyramid
**********************************************************/
void nearestNeighbor (MyImage *src, MyImage *dst)
{
int y;
int j;
int x;
int i;
unsigned char* t;
unsigned char* p;
int w1 = src->width;
int h1 = src->height;
int w2 = dst->width;
int h2 = dst->height;
int rat = 0;
unsigned char* src_data = src->data;
unsigned char* dst_data = dst->data;
int x_ratio = (int)((w1<<16)/w2) +1;
int y_ratio = (int)((h1<<16)/h2) +1;
for (i=0;i<h2;i++)
{
t = dst_data + i*w2;
y = ((i*y_ratio)>>16);
p = src_data + y*w1;
rat = 0;
for (j=0;j<w2;j++)
{
x = (rat>>16);
*t++ = p[x];
//printf("x_ratio=%d y_ratio=%d downsampling at src image: row=%d col=%d\n", x_ratio, y_ratio, y, x);
rat += x_ratio;
}
}
}
int readTextClassifier()
{
/*number of stages of the cascade classifier*/
int stages = 0;
/*total number of weak classifiers (one node each)*/
int total_nodes = 0;
int i, j, k, l;
char mystring [12];
int r_index = 0;
int w_index = 0;
int tree_index = 0;
FILE *finfo = fopen("info.txt", "r");
/**************************************************
* how many stages are in the cascaded filter?
* the first line of info.txt is the number of stages
**************************************************/
if ( fgets (mystring , 12 , finfo) != NULL )
stages = atoi(mystring);
if (stages == 0) {
printf("The number of stages in the cascaded filter must be postive\n");
return -1;
}
stages_array = (int *)malloc(sizeof(int)*stages);
/**************************************************
* how many filters in each stage?
* They are specified in info.txt,
* starting from second line.
* (from line 2 to line 26)
*************************************************/
i = 0;
while ( fgets (mystring , 12 , finfo) != NULL )
{
stages_array[i] = atoi(mystring);
total_nodes += stages_array[i];
i++;
}
fclose(finfo);
/* TODO: use matrices where appropriate */
/***********************************************
* Allocate a lot of array structures
* Note that, to increase parallelism,
* some arrays need to be splitted or duplicated
**********************************************/
rectangles_array = (int*)malloc(sizeof(int)*total_nodes*12);
scaled_rectangles_array = (int**)malloc(sizeof(int*)*total_nodes*12);
weights_array = (int*)malloc(sizeof(int)*total_nodes*3);
alpha1_array = (int*)malloc(sizeof(int)*total_nodes);
alpha2_array = (int*)malloc(sizeof(int)*total_nodes);
tree_thresh_array = (int*)malloc(sizeof(int)*total_nodes);
stages_thresh_array = (int*)malloc(sizeof(int)*stages);
FILE *fp = fopen("class.txt", "r");
/******************************************
* Read the filter parameters in class.txt
*
* Each stage of the cascaded filter has:
* 18 parameter per filter x tilter per stage
* + 1 threshold per stage
*
* For example,
* the first stage has 9 filters,
* the first stage is specified using
* 18 * 9 + 1 = 163 parameters
* They are line 1 to 163 of class.txt
*
* The 18 parameters for each filter are:
* 1 to 4: coordinates of rectangle 1
* 5: weight of rectangle 1
* 6 to 9: coordinates of rectangle 2
* 10: weight of rectangle 2
* 11 to 14: coordinates of rectangle 3
* 15: weight of rectangle 3
* 16: threshold of the filter
* 17: alpha 1 of the filter
* 18: alpha 2 of the filter
******************************************/
/* loop over n of stages */
for (i = 0; i < stages; i++)
{ /* loop over n of trees */
for (j = 0; j < stages_array[i]; j++)
{ /* loop over n of rectangular features */
for(k = 0; k < 3; k++)
{ /* loop over the n of vertices */
for (l = 0; l <4; l++)
{
if (fgets (mystring , 12 , fp) != NULL)
rectangles_array[r_index] = atoi(mystring);
else
break;
r_index++;
} /* end of l loop */
if (fgets (mystring , 12 , fp) != NULL)
{
weights_array[w_index] = atoi(mystring);
/* Shift value to avoid overflow in the haar evaluation */
/*TODO: make more general */
/*weights_array[w_index]>>=8; */
}
else
break;
w_index++;
} /* end of k loop */
if (fgets (mystring , 12 , fp) != NULL)
tree_thresh_array[tree_index]= atoi(mystring);
else
break;
if (fgets (mystring , 12 , fp) != NULL)
alpha1_array[tree_index]= atoi(mystring);
else
break;
if (fgets (mystring , 12 , fp) != NULL)
alpha2_array[tree_index]= atoi(mystring);
else
break;
tree_index++;
if (j == stages_array[i]-1)
{
if (fgets (mystring , 12 , fp) != NULL)
stages_thresh_array[i] = atoi(mystring);
else
break;
}
} /* end of j loop */
} /* end of i loop */
fclose(fp);
return total_nodes;
}
void releaseTextClassifier()
{
free(stages_array);
free(rectangles_array);
free(scaled_rectangles_array);
free(weights_array);
free(tree_thresh_array);
free(alpha1_array);
free(alpha2_array);
free(stages_thresh_array);
}
/* End of file. */
|
the_stack
|
#pragma once
#include <gunrock/app/problem_base.cuh>
#include <gunrock/oprtr/1D_oprtr/for_all.cuh>
#include <queue>
#define debug_aml(a...)
//#define debug_aml(a...) {printf("%s:%d ", __FILE__, __LINE__); printf(a);\
printf("\n");}
namespace gunrock {
namespace app {
namespace mf {
/**
* @brief Speciflying parameters for MF Problem
* @param parameters The util::Parameter<...> structure holding all
* parameter info
* \return cudaError_t error message(s), if any
*/
cudaError_t UseParameters_problem(util::Parameters ¶meters) {
cudaError_t retval = cudaSuccess;
GUARD_CU(gunrock::app::UseParameters_problem(parameters));
return retval;
}
/**
* @brief Max Flow Problem structure stores device-side arrays
* @tparam _GraphT Type of the graph
* @tparam _ValueT Type of signed integer to use as capacity and flow
of edges and as excess and height values of vertices.
* @tparam _FLAG Problem flags
*/
template <typename _GraphT, typename _ValueT = typename _GraphT::ValueT,
ProblemFlag _FLAG = Problem_None>
struct Problem : ProblemBase<_GraphT, _FLAG> {
typedef _GraphT GraphT;
static const ProblemFlag FLAG = _FLAG;
typedef typename GraphT::VertexT VertexT;
typedef typename GraphT::SizeT SizeT;
typedef typename GraphT::GpT GpT;
typedef _ValueT ValueT;
typedef ProblemBase<GraphT, FLAG> BaseProblem;
typedef DataSliceBase<GraphT, FLAG> BaseDataSlice;
// Helper structures
/**
* @brief Data structure containing MF-specific data on indivual GPU.
*/
struct DataSlice : BaseDataSlice {
// MF-specific storage arrays:
util::Array1D<SizeT, ValueT> flow; // edge flow
util::Array1D<SizeT, ValueT> residuals; // edge residuals
util::Array1D<SizeT, ValueT> excess; // vertex excess
util::Array1D<SizeT, VertexT> height; // vertex height
util::Array1D<SizeT, VertexT> reverse; // id reverse edge
util::Array1D<SizeT, SizeT> lowest_neighbor; // id lowest neighbor
util::Array1D<SizeT, VertexT> local_vertices; // set of vertices
util::Array1D<SizeT, SizeT, util::PINNED> active; // flag active vertices
util::Array1D<SizeT, VertexT> head;
util::Array1D<SizeT, VertexT> tail;
VertexT head_;
VertexT tail_;
util::Array1D<SizeT, bool> reachabilities;
util::Array1D<SizeT, VertexT> queue0;
util::Array1D<SizeT, VertexT> queue1;
util::Array1D<SizeT, bool> mark;
VertexT source; // source vertex
VertexT sink; // sink vertex
int num_repeats;
SizeT num_updated_vertices;
bool was_changed; // flag relabeling
util::Array1D<SizeT, SizeT, util::PINNED> changed;
/*
* @brief Default constructor
*/
DataSlice() : BaseDataSlice() {
source = util::PreDefinedValues<VertexT>::InvalidValue;
sink = util::PreDefinedValues<VertexT>::InvalidValue;
num_repeats = 10000;
num_updated_vertices = 1;
was_changed = false;
reverse.SetName("reverse");
excess.SetName("excess");
flow.SetName("flow");
residuals.SetName("residuals");
height.SetName("height");
lowest_neighbor.SetName("lowest_neighbor");
local_vertices.SetName("local_vertices");
active.SetName("active");
head.SetName("head");
tail.SetName("tail");
reachabilities.SetName("reachabilities");
queue0.SetName("queue0");
queue1.SetName("queue1");
mark.SetName("mark");
changed.SetName("changed");
}
/*
* @brief Default destructor
*/
virtual ~DataSlice() { Release(); }
/*
* @brief Releasing allocated memory space
* @param[in] target The location to release memory from
* \return cudaError_t Error message(s), if any
*/
cudaError_t Release(util::Location target = util::LOCATION_ALL) {
cudaError_t retval = cudaSuccess;
if (target & util::DEVICE) GUARD_CU(util::SetDevice(this->gpu_idx));
GUARD_CU(excess.Release(target));
GUARD_CU(flow.Release(target));
GUARD_CU(residuals.Release(target));
GUARD_CU(height.Release(target));
GUARD_CU(reverse.Release(target));
GUARD_CU(lowest_neighbor.Release(target));
GUARD_CU(local_vertices.Release(target));
GUARD_CU(active.Release(target));
GUARD_CU(head.Release(target));
GUARD_CU(tail.Release(target));
GUARD_CU(reachabilities.Release(target));
GUARD_CU(queue0.Release(target));
GUARD_CU(queue1.Release(target));
GUARD_CU(mark.Release(target));
GUARD_CU(BaseDataSlice::Release(target));
return retval;
}
/**
* @brief initializing MF-specific Data Slice a on each gpu
* @param sub_graph Sub graph on the GPU.
* @param[in] gpu_idx GPU device index
* @param[in] target Targeting device location
* @param[in] flag Problem flag containling options
* \return cudaError_t Error message(s), if any
*/
cudaError_t Init(GraphT &sub_graph, int num_gpus = 1, int gpu_idx = 0,
util::Location target = util::DEVICE,
ProblemFlag flag = Problem_None) {
debug_aml("DataSlice Init");
cudaError_t retval = cudaSuccess;
SizeT nodes_size = sub_graph.nodes;
SizeT edges_size = sub_graph.edges;
was_changed = false;
GUARD_CU(BaseDataSlice::Init(sub_graph, num_gpus, gpu_idx, target, flag));
//
// Allocate data on Gpu
//
GUARD_CU(flow.Allocate(edges_size, util::HOST | target));
GUARD_CU(residuals.Allocate(edges_size, target));
GUARD_CU(reverse.Allocate(edges_size, target));
GUARD_CU(excess.Allocate(nodes_size, target));
GUARD_CU(height.Allocate(nodes_size, util::HOST | target));
GUARD_CU(lowest_neighbor.Allocate(nodes_size, target));
GUARD_CU(local_vertices.Allocate(nodes_size, target));
GUARD_CU(active.Allocate(2, util::HOST | target));
GUARD_CU(head.Allocate(1, target));
GUARD_CU(tail.Allocate(1, target));
GUARD_CU(reachabilities.Allocate(nodes_size, target));
GUARD_CU(queue0.Allocate(nodes_size, target));
GUARD_CU(queue1.Allocate(nodes_size, target));
GUARD_CU(mark.Allocate(nodes_size, target));
GUARD_CU(changed.Allocate(1, util::HOST | target));
GUARD_CU(util::SetDevice(gpu_idx));
GUARD_CU(sub_graph.Move(util::HOST, target, this->stream));
return retval;
} // Init Data Slice
/**
* @brief Reset DataSlice function. Must be called prior to each run.
* @param[in] target Targeting device location
* \return cudaError_t Error message(s), if any
*/
cudaError_t Reset(const GraphT &graph, const VertexT source,
const VertexT sink, VertexT *h_reverse,
util::Location target = util::DEVICE) {
cudaError_t retval = cudaSuccess;
typedef typename GraphT::CsrT CsrT;
debug_aml("DataSlice Reset");
SizeT nodes_size = graph.nodes;
SizeT edges_size = graph.edges;
// Ensure data are allocated
GUARD_CU(active.EnsureSize_(2, target | util::HOST));
GUARD_CU(flow.EnsureSize_(edges_size, target));
GUARD_CU(residuals.EnsureSize_(edges_size, target));
GUARD_CU(reverse.EnsureSize_(edges_size, target));
GUARD_CU(excess.EnsureSize_(nodes_size, target));
GUARD_CU(height.EnsureSize_(nodes_size, target | util::HOST));
GUARD_CU(lowest_neighbor.EnsureSize_(nodes_size, target));
GUARD_CU(local_vertices.EnsureSize_(nodes_size, target));
GUARD_CU(head.EnsureSize_(1, target));
GUARD_CU(tail.EnsureSize_(1, target));
GUARD_CU(reachabilities.EnsureSize_(nodes_size, target));
GUARD_CU(queue0.EnsureSize_(nodes_size, target));
GUARD_CU(queue1.EnsureSize_(nodes_size, target));
GUARD_CU(mark.EnsureSize_(nodes_size, target));
GUARD_CU(changed.EnsureSize_(1, target | util::HOST));
GUARD_CU(util::SetDevice(this->gpu_idx));
GUARD_CU(reverse.SetPointer(h_reverse, edges_size, util::HOST));
GUARD_CU(reverse.Move(util::HOST, target, edges_size, 0, this->stream));
#if MF_DEBUG
debug_aml("reverse on CPU\n");
for (int i = 0; i < edges_size; ++i)
debug_aml("reverse[%d] = %d\n", i, h_reverse[i]);
debug_aml("reverse after coping to device\n");
GUARD_CU(reverse.ForAll(
[] __host__ __device__(VertexT * r, const VertexT &pos) {
debug_aml("reverse[%d] = %d\n", pos, r[pos]);
},
edges_size, target, this->stream));
#endif
this->num_updated_vertices = 1;
// Reset data
GUARD_CU(height.ForAll(
[source, sink, nodes_size] __host__ __device__(VertexT * h,
const VertexT &pos) {
if (pos == source)
h[pos] = nodes_size;
else // if (pos == sink)
h[pos] = 0;
// else
// h[pos] = 2 * nodes_size + 1;
},
nodes_size, target, this->stream));
GUARD_CU(flow.ForAll(
[] __host__ __device__(ValueT * f, const VertexT &pos) {
f[pos] = (ValueT)0;
},
edges_size, target, this->stream));
GUARD_CU(excess.ForAll(
[] __host__ __device__(ValueT * e, const VertexT &pos) {
e[pos] = (ValueT)0;
},
nodes_size, target, this->stream));
GUARD_CU(active.ForAll(
[] __host__ __device__(SizeT * active_, const VertexT &pos) {
active_[pos] = 1;
},
2, target | util::HOST, this->stream));
GUARD_CU(lowest_neighbor.ForAll(
[graph, source] __host__ __device__(VertexT * lowest_neighbor,
const VertexT pos) {
lowest_neighbor[pos] =
util::PreDefinedValues<VertexT>::InvalidValue;
},
nodes_size, target, this->stream));
GUARD_CU(local_vertices.ForAll(
[] __host__ __device__(VertexT * local_vertex, const VertexT pos) {
local_vertex[pos] = pos;
},
nodes_size, target));
GUARD_CU(mark.ForAll(
[] __host__ __device__(bool *mark_, const VertexT &pos) {
mark_[pos] = false;
},
nodes_size, target, this->stream));
GUARD_CU2(cudaDeviceSynchronize(), "cudaDeviceSynchronize failed.");
return retval;
}
}; // DataSlice
// Members
// Set of data slices (one for each GPU)
util::Array1D<SizeT, DataSlice> *data_slices;
// Methods
/**
* @brief MFProblem default constructor
*/
Problem(util::Parameters &_parameters, ProblemFlag _flag = Problem_None)
: BaseProblem(_parameters, _flag), data_slices(NULL) {}
/**
* @brief MFProblem default destructor
*/
virtual ~Problem() { Release(); }
/*
* @brief Releasing allocated memory space
* @param[in] target The location to release memory from
* \return cudaError_t Error message(s), if any
*/
cudaError_t Release(util::Location target = util::LOCATION_ALL) {
cudaError_t retval = cudaSuccess;
if (data_slices == NULL) return retval;
for (int i = 0; i < this->num_gpus; i++)
GUARD_CU(data_slices[i].Release(target));
if ((target & util::HOST) != 0 &&
data_slices[0].GetPointer(util::DEVICE) == NULL) {
delete[] data_slices;
data_slices = NULL;
}
GUARD_CU(BaseProblem::Release(target));
return retval;
}
/**
* \addtogroup PublicInterface
* @{
*/
/**
* @brief Copy result flow computed on GPUs back to host-side arrays.
* @param[out] h_flow Host array to store computed flow on edges
* \return cudaError_t Error message(s), if any
*/
cudaError_t Extract(ValueT *h_flow, util::Location target = util::DEVICE) {
cudaError_t retval = cudaSuccess;
auto &data_slice = data_slices[0][0];
SizeT eN = this->org_graph->edges;
// Set device
if (target == util::DEVICE) {
GUARD_CU(util::SetDevice(this->gpu_idx[0]));
GUARD_CU(data_slice.flow.SetPointer(h_flow, eN, util::HOST));
GUARD_CU(data_slice.flow.Move(util::DEVICE, util::HOST));
} else if (target == util::HOST) {
GUARD_CU(data_slice.flow.ForEach(
h_flow,
[] __host__ __device__(const ValueT &f, ValueT &h_f) {
{ h_f = f; }
},
eN, util::HOST));
}
GUARD_CU2(cudaDeviceSynchronize(), "cudaDeviceSynchronize failed");
return retval;
}
/**
* @brief Init MF Problem
* @param graph The graph that MF processes on
* @param[in] Location Memory location to work on
* \return cudaError_t Error message(s), if any
*/
cudaError_t Init(GraphT &graph, util::Location target = util::DEVICE) {
debug_aml("Problem Init");
cudaError_t retval = cudaSuccess;
GUARD_CU(BaseProblem::Init(graph, target));
data_slices = new util::Array1D<SizeT, DataSlice>[this->num_gpus];
for (int gpu = 0; gpu < this->num_gpus; gpu++) {
auto gpu_name = std::to_string(gpu);
data_slices[gpu].SetName("data_slices[" + gpu_name + "]");
if (target & util::DEVICE) GUARD_CU(util::SetDevice(this->gpu_idx[gpu]));
GUARD_CU(data_slices[gpu].Allocate(1, target | util::HOST));
auto &data_slice = data_slices[gpu][0];
GUARD_CU(data_slice.Init(this->sub_graphs[gpu], this->num_gpus,
this->gpu_idx[gpu], target, this->flag));
GUARD_CU2(cudaStreamSynchronize(data_slices[gpu]->stream),
"sync failed.");
} // end for (gpu)
return retval;
} // End Init MF Problem
/**
* @brief Reset Problem function. Must be called prior to each run.
* @param[in] src Source vertex to start.
* @param[in] location Memory location to work on
* \return cudaError_t Error message(s), if any
*/
cudaError_t Reset(GraphT &graph, VertexT *h_reverse,
util::Location target = util::DEVICE) {
cudaError_t retval = cudaSuccess;
debug_aml("Problem Reset");
auto source_vertex = this->parameters.template Get<VertexT>("source");
auto sink_vertex = this->parameters.template Get<VertexT>("sink");
auto num_repeats = this->parameters.template Get<int>("num-repeats");
for (int gpu = 0; gpu < this->num_gpus; ++gpu) {
auto &data_slice = data_slices[gpu][0];
data_slice.source = source_vertex;
data_slice.sink = sink_vertex;
data_slice.num_repeats = num_repeats;
// Set device
if (target & util::DEVICE) GUARD_CU(util::SetDevice(this->gpu_idx[gpu]));
GUARD_CU(data_slices[gpu]->Reset(graph, source_vertex, sink_vertex,
h_reverse, target));
GUARD_CU(data_slices[gpu].Move(util::HOST, target));
}
// Filling the initial input_queue for MF problem
int gpu;
VertexT src_;
if (this->num_gpus <= 1) {
gpu = 0;
src_ = source_vertex;
} else {
gpu = this->org_graph->partition_table[source_vertex];
if (this->flag & partitioner::Keep_Node_Num)
src_ = source_vertex;
else
src_ = this->org_graph->GpT::convertion_table[source_vertex];
}
GUARD_CU(util::SetDevice(this->gpu_idx[gpu]));
GUARD_CU2(cudaDeviceSynchronize(), "cudaDeviceSynchronize failed");
return retval;
}
/** @} */
};
} // namespace mf
} // namespace app
} // namespace gunrock
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
|
the_stack
|
#ifndef CH_SPH_GENERAL_CUH
#define CH_SPH_GENERAL_CUH
// ----------------------------------------------------------------------------
// CUDA headers
// ----------------------------------------------------------------------------
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <device_launch_parameters.h>
#include "chrono_fsi/ChApiFsi.h"
#include "chrono_fsi/utils/ChUtilsDevice.cuh"
#include "chrono_fsi/ChSystemFsi_impl.cuh"
#include "chrono_fsi/physics/ChParams.h"
#include "chrono_fsi/math/ChFsiLinearSolver.h"
#include "chrono_fsi/math/ExactLinearSolvers.cuh"
#include "chrono_fsi/math/custom_math.h"
namespace chrono {
namespace fsi {
/// Declared as const variables static in order to be able to use them in a different translation units in the utils
__constant__ static SimParams paramsD;
__constant__ static NumberOfObjects numObjectsD;
/// Short define of the kernel function
#define W3h W3h_Spline
/// Short define of the kernel function gradient
#define GradWh GradWh_Spline
void CopyParams_NumberOfObjects(std::shared_ptr<SimParams> paramsH, std::shared_ptr<NumberOfObjects> numObjectsH);
// 3D kernel function
//--------------------------------------------------------------------------------------------------------------------------------
// Cubic Spline SPH kernel function
__device__ inline Real W3h_Spline(Real d, Real h) { // d is positive. h is the sph kernel length (i.e. h in
// the document) d is the distance of 2 particles
Real invh = paramsD.INVHSML;
Real q = fabs(d) * invh;
if (q < 1) {
return (0.25f * (INVPI * cube(invh)) * (cube(2 - q) - 4 * cube(1 - q)));
}
if (q < 2) {
return (0.25f * (INVPI * cube(invh)) * cube(2 - q));
}
return 0;
}
//--------------------------------------------------------------------------------------------------------------------------------
// Johnson kernel 1996b
__device__ inline Real W3h_High(Real d, Real h) { // d is positive. h is the sph kernel length (i.e. h in
// the document) d is the distance of 2 particles
Real invh = paramsD.INVHSML;
Real q = fabs(d) * invh;
if (q < 2) {
return (1.25f * (INVPI * cube(invh)) * (0.1875f * square(q) - 0.75f * q + 0.75f));
}
return 0;
}
//--------------------------------------------------------------------------------------------------------------------------------
// Quintic Spline SPH kernel function
__device__ inline Real W3h_Quintic(Real d, Real h) { // d is positive. h is the sph kernel length (i.e. h in
// the document) d is the distance of 2 particles
Real invh = paramsD.INVHSML;
Real q = fabs(d) * invh;
Real coeff = 8.35655e-3; // 3/359
if (q < 1) {
return (coeff * INVPI * cube(invh) * (quintic(3 - q) - 6 * quintic(2 - q) + 15 * quintic(1 - q)));
}
if (q < 2) {
return (coeff * INVPI * cube(invh) * (quintic(3 - q) - 6 * quintic(2 - q)));
}
if (q < 3) {
return (coeff * INVPI * cube(invh) * (quintic(3 - q)));
}
return 0;
}
// Gradient of the kernel function
//--------------------------------------------------------------------------------------------------------------------------------
// Gradient of Cubic Spline SPH kernel function
__device__ inline Real3 GradWh_Spline(Real3 d, Real h) { // d is positive. r is the sph kernel length (i.e. h
// in the document) d is the distance of 2 particles
Real invh = paramsD.INVHSML;
Real q = length(d) * invh;
if (abs(q) < EPSILON)
return mR3(0.0);
bool less1 = (q < 1);
bool less2 = (q < 2);
return (less1 * (3 * q - 4.0f) + less2 * (!less1) * (-q + 4.0f - 4.0f / q)) * .75f * INVPI * quintic(invh) * d;
}
//--------------------------------------------------------------------------------------------------------------------------------
// Gradient of Johnson kernel 1996b
__device__ inline Real3 GradWh_High(Real3 d, Real h) { // d is positive. r is the sph kernel length (i.e. h
// in the document) d is the distance of 2 particles
Real invh = paramsD.INVHSML;
Real q = length(d) * invh;
if (abs(q) < EPSILON)
return mR3(0.0);
bool less2 = (q < 2);
return (3.0 / 8.0 * q - 3.0 / 4.0) * 5.0 / 4.0 / q * INVPI * (1.0 / quintic(h)) * d * less2;
}
//--------------------------------------------------------------------------------------------------------------------------------
// Gradient of Quintic Spline SPH kernel function
__device__ inline Real3 W3h_Quintic(Real3 d, Real h) { // d is positive. h is the sph kernel length (i.e. h in
// the document) d is the distance of 2 particles
Real invh = paramsD.INVHSML;
Real q = length(d) * invh;
if (fabs(q) < 1e-10)
return mR3(0.0);
Real coeff = -4.178273e-2; // -15/359
if (q < 1) {
return (coeff * (INVPI * quintic(invh) / q) * d * (quartic(3 - q) - 6 * quartic(2 - q) + 15 * quartic(1 - q)));
}
if (q < 2) {
return (coeff * (INVPI * quintic(invh) / q) * d * (quartic(3 - q) - 6 * quartic(2 - q)));
}
if (q < 3) {
return (coeff * (INVPI * quintic(invh) / q) * d * (quartic(3 - q)));
}
return mR3(0);
}
//--------------------------------------------------------------------------------------------------------------------------------
// fluid equation of state
__device__ inline Real Eos(Real rho, Real type) {
// if (rho < paramsD.rho0) //
// rho = paramsD.rho0; //
//******************************
// Real gama = 7;
// Real B = 100 * paramsD.rho0 * paramsD.v_Max * paramsD.v_Max / gama;
// return B * (pow(rho / paramsD.rho0, gama) - 1) + paramsD.BASEPRES; //
return paramsD.Cs * paramsD.Cs * (rho - paramsD.rho0); //
}
//--------------------------------------------------------------------------------------------------------------------------------
// Inverse of equation of state
__device__ inline Real InvEos(Real pw) {
Real rho = pw / (paramsD.Cs * paramsD.Cs) + paramsD.rho0; //
return rho;
}
//--------------------------------------------------------------------------------------------------------------------------------
// ferrariCi
__device__ inline Real FerrariCi(Real rho) {
int gama = 7;
Real B = 100 * paramsD.rho0 * paramsD.v_Max * paramsD.v_Max / gama;
return sqrt(gama * B / paramsD.rho0) * pow(rho / paramsD.rho0, 0.5 * (gama - 1));
}
//--------------------------------------------------------------------------------------------------------------------------------
__device__ inline Real3 Modify_Local_PosB(Real3& b, Real3 a) {
Real3 dist3 = a - b;
b.x += ((dist3.x > 0.5f * paramsD.boxDims.x) ? paramsD.boxDims.x : 0);
b.x -= ((dist3.x < -0.5f * paramsD.boxDims.x) ? paramsD.boxDims.x : 0);
b.y += ((dist3.y > 0.5f * paramsD.boxDims.y) ? paramsD.boxDims.y : 0);
b.y -= ((dist3.y < -0.5f * paramsD.boxDims.y) ? paramsD.boxDims.y : 0);
b.z += ((dist3.z > 0.5f * paramsD.boxDims.z) ? paramsD.boxDims.z : 0);
b.z -= ((dist3.z < -0.5f * paramsD.boxDims.z) ? paramsD.boxDims.z : 0);
dist3 = a - b;
// modifying the markers perfect overlap
Real dd = dist3.x*dist3.x + dist3.y*dist3.y + dist3.z*dist3.z;
Real MinD = paramsD.epsMinMarkersDis * paramsD.HSML;
Real sq_MinD = MinD * MinD;
if (dd < sq_MinD) {
dist3 = mR3(MinD, 0, 0);
}
b = a - dist3;
return (dist3);
}
__device__ inline Real3 Distance(Real3 a, Real3 b) {
return Modify_Local_PosB(b, a);
}
//--------------------------------------------------------------------------------------------------------------------------------
// first comp of q is rotation, last 3 components are axis of rot
__device__ inline void RotationMatirixFromQuaternion(Real3& AD1, Real3& AD2, Real3& AD3, const Real4& q) {
AD1 = 2 * mR3(0.5f - q.z * q.z - q.w * q.w, q.y * q.z - q.x * q.w, q.y * q.w + q.x * q.z);
AD2 = 2 * mR3(q.y * q.z + q.x * q.w, 0.5f - q.y * q.y - q.w * q.w, q.z * q.w - q.x * q.y);
AD3 = 2 * mR3(q.y * q.w - q.x * q.z, q.z * q.w + q.x * q.y, 0.5f - q.y * q.y - q.z * q.z);
}
//--------------------------------------------------------------------------------------------------------------------------------
__device__ inline Real3 InverseRotate_By_RotationMatrix_DeviceHost(const Real3& A1,
const Real3& A2,
const Real3& A3,
const Real3& r3) {
return mR3(A1.x * r3.x + A2.x * r3.y + A3.x * r3.z, A1.y * r3.x + A2.y * r3.y + A3.y * r3.z,
A1.z * r3.x + A2.z * r3.y + A3.z * r3.z);
}
//--------------------------------------------------------------------------------------------------------------------------------
__device__ inline int3 calcGridPos(Real3 p) {
int3 gridPos;
if (paramsD.cellSize.x * paramsD.cellSize.y * paramsD.cellSize.z == 0)
printf("calcGridPos=%f,%f,%f\n", paramsD.cellSize.x, paramsD.cellSize.y, paramsD.cellSize.z);
gridPos.x = (int)floor((p.x - paramsD.worldOrigin.x) / paramsD.cellSize.x);
gridPos.y = (int)floor((p.y - paramsD.worldOrigin.y) / paramsD.cellSize.y);
gridPos.z = (int)floor((p.z - paramsD.worldOrigin.z) / paramsD.cellSize.z);
return gridPos;
}
//--------------------------------------------------------------------------------------------------------------------------------
__device__ inline uint calcGridHash(int3 gridPos) {
gridPos.x -= ((gridPos.x >= paramsD.gridSize.x) ? paramsD.gridSize.x : 0);
gridPos.y -= ((gridPos.y >= paramsD.gridSize.y) ? paramsD.gridSize.y : 0);
gridPos.z -= ((gridPos.z >= paramsD.gridSize.z) ? paramsD.gridSize.z : 0);
gridPos.x += ((gridPos.x < 0) ? paramsD.gridSize.x : 0);
gridPos.y += ((gridPos.y < 0) ? paramsD.gridSize.y : 0);
gridPos.z += ((gridPos.z < 0) ? paramsD.gridSize.z : 0);
return gridPos.z * paramsD.gridSize.y * paramsD.gridSize.x + gridPos.y * paramsD.gridSize.x + gridPos.x;
}
////--------------------------------------------------------------------------------------------------------------------------------
inline __device__ Real Strain_Rate(Real3 grad_ux, Real3 grad_uy, Real3 grad_uz) {
grad_ux.y = (grad_uy.x + grad_ux.y) * 0.5;
grad_ux.z = (grad_uz.x + grad_ux.z) * 0.5;
grad_uy.x = grad_ux.y;
grad_uy.z = (grad_uy.z + grad_uz.y) * 0.5;
grad_uz.x = grad_ux.z;
grad_uz.y = grad_uy.z;
return sqrt( //
0.5 * (length(grad_ux) * length(grad_ux) + //
length(grad_uy) * length(grad_uy) + //
length(grad_uz) * length(grad_uz)) //
);
}
////--------------------------------------------------------------------------------------------------------------------------------
inline __device__ Real Tensor_Norm(Real* T) {
return sqrt( //
0.5 * (T[0] * T[0] + T[1] * T[1] + T[2] * T[2] + //
T[3] * T[3] + T[4] * T[4] + T[5] * T[5] + //
T[6] * T[6] + T[7] * T[7] + T[8] * T[8]) //
);
}
////--------------------------------------------------------------------------------------------------------------------------------
inline __device__ Real Sym_Tensor_Norm(Real3 xx_yy_zz, Real3 xy_xz_yz) {
return sqrt(0.5 * (xx_yy_zz.x * xx_yy_zz.x + xx_yy_zz.y * xx_yy_zz.y + xx_yy_zz.z * xx_yy_zz.z +
2 * xy_xz_yz.x * xy_xz_yz.x + 2 * xy_xz_yz.y * xy_xz_yz.y + 2 * xy_xz_yz.z * xy_xz_yz.z));
}
////--------------------------------------------------------------------------------------------------------------------------------
inline __device__ Real Inertia_num(Real Strain_rate, Real rho, Real p, Real diam) {
Real I = Strain_rate * diam * sqrt(rho / rmaxr(p, EPSILON));
return rminr(1e3, I);
}
////--------------------------------------------------------------------------------------------------------------------------------
inline __device__ Real mu_I(Real Strain_rate, Real I) {
Real mu = 0;
if (paramsD.mu_of_I == friction_law::constant)
mu = paramsD.mu_fric_s;
else if (paramsD.mu_of_I == friction_law::linear)
mu = paramsD.mu_fric_s + paramsD.mu_I_b * I;
else
mu = paramsD.mu_fric_s + (paramsD.mu_fric_2 - paramsD.mu_fric_s) * (I / (paramsD.mu_I0 + I));
return mu;
}
////--------------------------------------------------------------------------------------------------------------------------------
inline __device__ Real mu_eff(Real Strain_rate, Real p, Real mu_I) {
return rmaxr(mu_I * rmaxr(p, 0.0) / Strain_rate, paramsD.mu_max);
}
////--------------------------------------------------------------------------------------------------------------------------------
inline __device__ Real Herschel_Bulkley_stress(Real Strain_rate, Real k, Real n, Real tau0) {
Real tau = tau0 + k * pow(Strain_rate, n);
return tau;
}
////--------------------------------------------------------------------------------------------------------------------------------
inline __device__ Real Herschel_Bulkley_mu_eff(Real Strain_rate, Real k, Real n, Real tau0) {
Real mu_eff = tau0 / Strain_rate + k * pow(Strain_rate, n - 1);
return rminr(mu_eff, paramsD.mu_max);
}
////--------------------------------------------------------------------------------------------------------------------------------
inline __device__ void BCE_Vel_Acc(int i_idx,
Real3& myAcc,
Real3& V_prescribed,
Real4* sortedPosRad,
int4 updatePortion,
uint* gridMarkerIndexD,
Real4* qD,
Real3* rigidSPH_MeshPos_LRF_D,
Real3* posRigid_fsiBodies_D,
Real4* velMassRigid_fsiBodies_D,
Real3* omegaVelLRF_fsiBodies_D,
Real3* accRigid_fsiBodies_D,
Real3* omegaAccLRF_fsiBodies_D,
uint* rigidIdentifierD,
Real3* pos_fsi_fea_D,
Real3* vel_fsi_fea_D,
Real3* acc_fsi_fea_D,
uint* FlexIdentifierD,
const int numFlex1D,
uint2* CableElementsNodes,
uint4* ShellelementsNodes) {
int Original_idx = gridMarkerIndexD[i_idx];
// See if this belongs to a fixed boundary
if (Original_idx >= updatePortion.x && Original_idx < updatePortion.y) {
myAcc = mR3(0.0);
V_prescribed = mR3(0.0);
if (paramsD.Apply_BC_U)
V_prescribed = user_BC_U(mR3(sortedPosRad[i_idx]));
} else if (Original_idx >= updatePortion.y && Original_idx < updatePortion.z) {
int rigidIndex = rigidIdentifierD[Original_idx - updatePortion.y];
Real4 q4 = qD[rigidIndex];
Real3 a1, a2, a3;
RotationMatirixFromQuaternion(a1, a2, a3, q4);
Real3 rigidSPH_MeshPos_LRF__ = rigidSPH_MeshPos_LRF_D[Original_idx - updatePortion.y];
// Real3 p_com = mR3(posRigid_fsiBodies_D[rigidIndex]);
Real3 v_com = mR3(velMassRigid_fsiBodies_D[rigidIndex]);
Real3 a_com = accRigid_fsiBodies_D[rigidIndex];
Real3 angular_v_com = omegaVelLRF_fsiBodies_D[rigidIndex];
Real3 angular_a_com = omegaAccLRF_fsiBodies_D[rigidIndex];
// Real3 p_rel = mR3(sortedPosRad[i_idx]) - p_com;
Real3 omegaCrossS = cross(angular_v_com, rigidSPH_MeshPos_LRF__);
V_prescribed = v_com + mR3(dot(a1, omegaCrossS), dot(a2, omegaCrossS), dot(a3, omegaCrossS));
// V_prescribed = v_com + cross(angular_v_com, rigidSPH_MeshPos_LRF);
Real3 alphaCrossS = cross(angular_a_com, rigidSPH_MeshPos_LRF__);
Real3 alphaCrossScrossS = cross(angular_v_com, cross(angular_v_com, rigidSPH_MeshPos_LRF__));
// myAcc = a_com + cross(angular_a_com, p_rel) + cross(angular_v_com, cross(angular_v_com,
// rigidSPH_MeshPos_LRF__));
myAcc = a_com + mR3(dot(a1, alphaCrossS), dot(a2, alphaCrossS), dot(a3, alphaCrossS)) +
mR3(dot(a1, alphaCrossScrossS), dot(a2, alphaCrossScrossS), dot(a3, alphaCrossScrossS));
// Or not, Flexible bodies for sure
} else if (Original_idx >= updatePortion.z && Original_idx < updatePortion.w) {
int FlexIndex = FlexIdentifierD[Original_idx - updatePortion.z];
if (FlexIndex < numFlex1D) {
int nA = CableElementsNodes[FlexIndex].x;
int nB = CableElementsNodes[FlexIndex].y;
Real3 pos_fsi_fea_D_nA = pos_fsi_fea_D[nA];
Real3 pos_fsi_fea_D_nB = pos_fsi_fea_D[nB];
Real3 vel_fsi_fea_D_nA = vel_fsi_fea_D[nA];
Real3 vel_fsi_fea_D_nB = vel_fsi_fea_D[nB];
Real3 acc_fsi_fea_D_nA = acc_fsi_fea_D[nA];
Real3 acc_fsi_fea_D_nB = acc_fsi_fea_D[nB];
Real3 dist3 = mR3(sortedPosRad[i_idx]) - pos_fsi_fea_D_nA;
Real3 x_dir = (pos_fsi_fea_D_nB - pos_fsi_fea_D_nA);
Real Cable_x = length(x_dir);
x_dir = x_dir / length(x_dir);
Real dx = dot(dist3, x_dir);
Real2 N_cable = Cables_ShapeFunctions(dx / Cable_x);
Real NA = N_cable.x;
Real NB = N_cable.y;
V_prescribed = NA * vel_fsi_fea_D_nA + NB * vel_fsi_fea_D_nB;
myAcc = NA * acc_fsi_fea_D_nA + NB * acc_fsi_fea_D_nB;
}
if (FlexIndex >= numFlex1D) {
int nA = ShellelementsNodes[FlexIndex - numFlex1D].x;
int nB = ShellelementsNodes[FlexIndex - numFlex1D].y;
int nC = ShellelementsNodes[FlexIndex - numFlex1D].z;
int nD = ShellelementsNodes[FlexIndex - numFlex1D].w;
Real3 pos_fsi_fea_D_nA = pos_fsi_fea_D[nA];
Real3 pos_fsi_fea_D_nB = pos_fsi_fea_D[nB];
Real3 pos_fsi_fea_D_nC = pos_fsi_fea_D[nC];
Real3 pos_fsi_fea_D_nD = pos_fsi_fea_D[nD];
Real3 vel_fsi_fea_D_nA = vel_fsi_fea_D[nA];
Real3 vel_fsi_fea_D_nB = vel_fsi_fea_D[nB];
Real3 vel_fsi_fea_D_nC = vel_fsi_fea_D[nC];
Real3 vel_fsi_fea_D_nD = vel_fsi_fea_D[nD];
Real3 acc_fsi_fea_D_nA = acc_fsi_fea_D[nA];
Real3 acc_fsi_fea_D_nB = acc_fsi_fea_D[nB];
Real3 acc_fsi_fea_D_nC = acc_fsi_fea_D[nC];
Real3 acc_fsi_fea_D_nD = acc_fsi_fea_D[nD];
Real3 Shell_center = 0.25 * (pos_fsi_fea_D_nA + pos_fsi_fea_D_nB + pos_fsi_fea_D_nC + pos_fsi_fea_D_nD);
// Note that this must be the i_idx itself not the Original_idx
Real3 dist3 = mR3(sortedPosRad[i_idx]) - Shell_center;
Real Shell_x =
0.25 * (length(pos_fsi_fea_D_nB - pos_fsi_fea_D_nA) + length(pos_fsi_fea_D_nC - pos_fsi_fea_D_nD));
Real Shell_y =
0.25 * (length(pos_fsi_fea_D_nD - pos_fsi_fea_D_nA) + length(pos_fsi_fea_D_nC - pos_fsi_fea_D_nB));
Real2 FlexSPH_MeshPos_Natural = mR2(dist3.x / Shell_x, dist3.y / Shell_y);
Real4 N_shell = Shells_ShapeFunctions(FlexSPH_MeshPos_Natural.x, FlexSPH_MeshPos_Natural.y);
Real NA = N_shell.x;
Real NB = N_shell.y;
Real NC = N_shell.z;
Real ND = N_shell.w;
V_prescribed =
NA * vel_fsi_fea_D_nA + NB * vel_fsi_fea_D_nB + NC * vel_fsi_fea_D_nC + ND * vel_fsi_fea_D_nD;
myAcc = NA * acc_fsi_fea_D_nA + NB * acc_fsi_fea_D_nB + NC * acc_fsi_fea_D_nC + ND * acc_fsi_fea_D_nD;
}
} else {
printf("i_idx=%d, Original_idx:%d was not found \n\n", i_idx, Original_idx);
}
}
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void calc_A_tensor(Real* A_tensor,
Real* G_tensor,
Real4* sortedPosRad,
Real4* sortedRhoPreMu,
Real* sumWij_inv,
uint* cellStart,
uint* cellEnd,
const size_t numAllMarkers,
volatile bool* isErrorD);
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void calc_L_tensor(Real* A_tensor,
Real* L_tensor,
Real* G_tensor,
Real4* sortedPosRad,
Real4* sortedRhoPreMu,
Real* sumWij_inv,
uint* cellStart,
uint* cellEnd,
const size_t numAllMarkers,
volatile bool* isErrorD);
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void calcRho_kernel(Real4* sortedPosRad, // input: sorted positionsmin(
Real4* sortedRhoPreMu,
Real* sumWij_inv,
uint* cellStart,
uint* cellEnd,
uint* mynumContact,
const size_t numAllMarkers,
volatile bool* isErrorD);
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void calcNormalizedRho_kernel(Real4* sortedPosRad, // input: sorted positions
Real3* sortedVelMas,
Real4* sortedRhoPreMu,
Real* sumWij_inv,
Real* G_i,
Real3* normals,
Real* Color,
uint* cellStart,
uint* cellEnd,
const size_t numAllMarkers,
volatile bool* isErrorD);
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void calcNormalizedRho_Gi_fillInMatrixIndices(Real4* sortedPosRad, // input: sorted positions
Real3* sortedVelMas,
Real4* sortedRhoPreMu,
Real* sumWij_inv,
Real* G_i,
Real3* normals,
uint* csrColInd,
uint* numContacts,
uint* cellStart,
uint* cellEnd,
const size_t numAllMarkers,
volatile bool* isErrorD);
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void Function_Gradient_Laplacian_Operator(Real4* sortedPosRad, // input: sorted positions
Real3* sortedVelMas,
Real4* sortedRhoPreMu,
Real* sumWij_inv,
Real* G_tensor,
Real* L_tensor,
Real* A_L, // velocity Laplacian matrix;
Real3* A_G, // This is a matrix in a way that A*p gives the gradp
Real* A_f,
uint* csrColInd,
uint* numContacts,
const size_t numAllMarkers,
volatile bool* isErrorD);
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void Jacobi_SOR_Iter(Real4* sortedRhoPreMu,
Real* A_Matrix,
Real3* V_old,
Real3* V_new,
Real3* b3vec,
Real* q_old, // q=p^(n+1)-p^n
Real* q_new, // q=p^(n+1)-p^n
Real* b1vec,
const uint* csrColInd,
const uint* numContacts,
size_t numAllMarkers,
bool _3dvector,
volatile bool* isErrorD);
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void Update_AND_Calc_Res(Real4* sortedRhoPreMu,
Real3* V_old,
Real3* V_new,
Real* q_old,
Real* q_new,
Real* Residuals,
const size_t numAllMarkers,
bool _3dvector,
volatile bool* isErrorD);
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void Initialize_Variables(Real4* sortedRhoPreMu,
Real* p_old,
Real3* sortedVelMas,
Real3* V_new,
const size_t numAllMarkers,
volatile bool* isErrorD);
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void UpdateDensity(Real3* vis_vel,
Real3* XSPH_Vel,
Real3* new_vel, // Write
Real4* sortedPosRad, // Read
Real4* sortedRhoPreMu,
Real* sumWij_inv,
uint* cellStart,
uint* cellEnd,
size_t numAllMarkers,
volatile bool* isErrorD);
} // namespace fsi
} // namespace chrono
#endif
|
the_stack
|
#include <curand.h>
#include <curand_kernel.h>
#include <torch/extension.h>
typedef torch::PackedTensorAccessor32<int32_t, 1, torch::RestrictPtrTraits> int32_accessor_1d;
typedef torch::PackedTensorAccessor32<int32_t, 3, torch::RestrictPtrTraits> int32_accessor_3d;
typedef torch::PackedTensorAccessor32<int32_t, 4, torch::RestrictPtrTraits> int32_accessor_4d;
typedef torch::PackedTensorAccessor32<int64_t, 3, torch::RestrictPtrTraits> int64_accessor_3d;
typedef torch::PackedTensorAccessor32<float, 3, torch::RestrictPtrTraits> float_accessor_3d;
typedef torch::PackedTensorAccessor32<float, 4, torch::RestrictPtrTraits> float_accessor_4d;
/**
* Compute hamming distances
*/
__device__
int hamming_distance(int64_t a, int64_t b) {
return __popcll(a ^ b);
}
/**
* Set up the kernel to generate cuda random numbers
*/
__global__
void setup_kernel(curandState *state) {
int idx = threadIdx.x+blockDim.x*blockIdx.x;
curand_init(1234, idx, 0, &state[idx]);
}
/**
* This kernel assigns datapoints to the closest centroids based on the hamming
* distance
*
* Arguments:
* ---------
* Inputs:
* hash_codes : hash codes tensor to be clustered
* lengths : sequence lengths array
* centroids : current estimate of the centroids
* n_blocks_per_sequence : number of blocks allocated per sequence
* MAX : MAX distance possible (64 int_64 hamming)
*
* Outputs:
* labels : labels to be assigned to each data point
* distances : distances to the closest cluster
*/
__global__
void assign_clusters_kernel(
const int64_accessor_3d hash_codes,
const int32_accessor_1d lengths,
const int64_accessor_3d centroids,
int32_accessor_3d labels,
int32_accessor_3d distances,
const int n_blocks_per_sequence,
int MAX=65
) {
int H = centroids.size(1);
int L = hash_codes.size(2);
int K = centroids.size(2);
// Load the shared memory
const int sequence_index = blockIdx.x / n_blocks_per_sequence;
const int n = sequence_index / H;
const int h = sequence_index % H;
extern __shared__ int64_t shared_means[];
if (threadIdx.x < K) {
shared_means[threadIdx.x] = centroids[n][h][threadIdx.x];
}
__syncthreads();
// Extract the indexes
const int l = ((blockIdx.x % n_blocks_per_sequence)*blockDim.x) + threadIdx.x;
// Each block is only responsible for one sequence
if(l >= L) {
return;
}
// Beyond the sequence length set the cluster label to (K+1) where K is the clusters
if(l >= lengths[n]) {
labels[n][h][l] = K+1;
distances[n][h][l] = -1;
return;
}
// Make global loads once.
const int64_t x = hash_codes[n][h][l];
// update the cluster assingments
// 64 bit hashcodes can have maximum hamming distance as 64
int best_distance = MAX;
int best_cluster = 0;
int dist = 0;
for (int cluster = 0; cluster < K; ++cluster) {
dist = hamming_distance(x, shared_means[cluster]);
if (dist < best_distance) {
best_distance = dist;
best_cluster = cluster;
}
}
labels[n][h][l] = best_cluster;
distances[n][h][l] = best_distance;
}
/**
* This kernel counts the number of data points belonging to each cluster and
* also updates cluster_bit_counts for each cluster cluster_bit_counts for any
* cluster is an array with size [B x 1]. Each position stores the
* difference of number of data points with ones and number of data points with
* zeros at that position in the binary representation of the number.
*
* Arguments:
* ---------
* Inputs:
* labels : labels to be assigned to each data point
* hash_codes : hash codes to be clustered
*
* Outputs:
* counts : array to store the number of datapoints
* belonging to any cluster
* cluster_bit_counts : array containing the bit counts
*/
__global__
void bit_count_kernel(
const int32_accessor_3d labels,
const int64_accessor_3d hash_codes,
int32_accessor_3d counts,
int32_accessor_4d cluster_bit_counts
) {
const int N = labels.size(0);
const int H = labels.size(1);
const int L = labels.size(2);
const int K = counts.size(2);
const int B = cluster_bit_counts.size(3);
const int hl = H*L;
// Extract the indices
int full_idx = (blockDim.x * blockIdx.x) + threadIdx.x;
const int sequence_index = full_idx / L;
const int n = sequence_index / H;
const int h = sequence_index % H;
const int l = full_idx - n*hl - h*L;
if (n >= N)
return;
const int64_t x = hash_codes[n][h][l];
int val_to_add = -1;
const int best_cluster = labels[n][h][l];
if(best_cluster == (K+1)) {
return;
}
for (int i=0; i<B; i++) {
int64_t bit= 1L << i;
if((x & bit) > 0) {
val_to_add = 1;
}
else {
val_to_add = -1;
}
atomicAdd(&cluster_bit_counts[n][h][best_cluster][i], val_to_add);
}
atomicAdd(&counts[n][h][best_cluster], 1);
}
/**
* This kernel computes the new means based on the cluster_bit_counts
* Arguments:
* ---------
* Inputs:
* state : cuda randome state for the random number generation
* counts : array to store the number of datapoints
* belonging to any cluster
* Outputs:
* centroids : centroids to be updated
* cluster_bit_counts : array containing the bit counts
*/
__global__
void compute_means_kernel(
const int32_accessor_3d counts,
int32_accessor_4d cluster_bit_counts,
int64_accessor_3d centroids,
curandState* state
) {
const int N = counts.size(0);
const int H = counts.size(1);
const int K = counts.size(2);
const int B = cluster_bit_counts.size(3);
// Extract indices
const int full_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
if( full_idx >= (K*N*H))
return;
const int sequence_idx = full_idx / K;
const int n = sequence_idx / H;
const int h = sequence_idx % H;
const int k = full_idx % K;
int64_t mean_k = 0;
const int64_t MAX = (1L << (B));
// if the counts for the current cluster is 0 set mean to random
if(counts[n][h][k] == 0) {
centroids[n][h][k] = int64_t(curand(state + k) % MAX);
return;
}
//update otherwise
for( int i=0; i<B; i++) {
if(cluster_bit_counts[n][h][k][i] == 0) {
cluster_bit_counts[n][h][k][i] =
(curand(state + k) & 1L);
}
if(cluster_bit_counts[n][h][k][i] > 0) {
mean_k = mean_k | (1L << i);
}
}
centroids[n][h][k] = mean_k;
}
/**
* Kmeans runs lloyd iteratively to first assign the points and then update
* the clusters
* Arguments:
* ---------
* Inputs:
* hash_codes : the hash codes to be clustered
* lengths : sequence lengths array
* centroids : centroid buffer
* distances : distances buffer
* cluster_bit_counts : bit counts buffer
* iterations : number of iterations of Lloyd
*
* Outputs:
* labels : array to store the labels assigned to each point
* counts : array to store the number of datapoints belonging
* to any cluster
* Size (L*NH*K)
* [0..K-1] are counts for 1st sequence
* [K..2K-1] are counts for 2nd sequence.
*/
void kmeans(
const torch::Tensor hash_codes,
const torch::Tensor lengths,
torch::Tensor centroids,
torch::Tensor distances,
torch::Tensor cluster_bit_counts,
torch::Tensor labels,
torch::Tensor counts,
int iterations
) {
// Make sure that we are using the correct GPU device
torch::DeviceGuard _guard(hash_codes.device());
const int64_accessor_3d hash_codes_acc = hash_codes.packed_accessor32<int64_t, 3, torch::RestrictPtrTraits>();
const int32_accessor_1d lengths_acc = lengths.packed_accessor32<int32_t, 1, torch::RestrictPtrTraits>();
int64_accessor_3d centroids_acc = centroids.packed_accessor32<int64_t, 3, torch::RestrictPtrTraits>();
int32_accessor_3d distances_acc = distances.packed_accessor32<int32_t, 3, torch::RestrictPtrTraits>();
int32_accessor_4d cluster_bit_counts_acc = cluster_bit_counts.packed_accessor32<int32_t, 4, torch::RestrictPtrTraits>();
int32_accessor_3d labels_acc = labels.packed_accessor32<int32_t, 3, torch::RestrictPtrTraits>();
int32_accessor_3d counts_acc = counts.packed_accessor32<int32_t, 3, torch::RestrictPtrTraits>();
const int N = hash_codes.size(0);
const int H = hash_codes.size(1);
const int NH = N*H;
const int L = hash_codes.size(2);
const int K = centroids.size(2);
const int B = cluster_bit_counts.size(3);
// allocate the temporary arrays we will need
curandState *d_state;
cudaMalloc(&d_state, sizeof(curandState));
setup_kernel<<<1,K>>>(d_state);
// Estimate the number of threads we will need
const int n_blocks_per_sequence = (L-1)/1024 + 1;
// Dividing the number of threads roughly equally among blocks
// Max because each blocks needs K threads to load shared memory
const int n_threads_assign = max((L-1)/n_blocks_per_sequence + 1, K);
const int n_blocks_assign = NH * n_blocks_per_sequence;
const int shared_mem_assign = K * sizeof(int64_t);
const int n_threads_cnt = 1024;
const int n_blocks_cnt = ((L*NH)-1)/n_threads_cnt + 1;
const int n_threads_centroids = 1024;
const int n_blocks_centroids = ((K*NH)-1)/n_threads_cnt + 1;
//Lloyd iterations
for (size_t itr = 0; itr < iterations; ++itr) {
assign_clusters_kernel<<<n_blocks_assign,
n_threads_assign,
shared_mem_assign>>>(
hash_codes_acc,
lengths_acc,
centroids_acc,
labels_acc,
distances_acc,
n_blocks_per_sequence
);
counts.zero_();
cluster_bit_counts.zero_();
bit_count_kernel<<<n_blocks_cnt,
n_threads_cnt>>>(
labels_acc,
hash_codes_acc,
counts_acc,
cluster_bit_counts_acc
);
compute_means_kernel<<<n_blocks_centroids,
n_threads_centroids>>>(
counts_acc,
cluster_bit_counts_acc,
centroids_acc,
d_state
);
}
assign_clusters_kernel<<<n_blocks_assign,
n_threads_assign,
shared_mem_assign>>>(
hash_codes_acc,
lengths_acc,
centroids_acc,
labels_acc,
distances_acc,
n_blocks_per_sequence
);
counts.zero_();
cluster_bit_counts.zero_();
bit_count_kernel<<<n_blocks_cnt,
n_threads_cnt>>>(
labels_acc,
hash_codes_acc,
counts_acc,
cluster_bit_counts_acc
);
cudaFree(d_state);
return;
}
/**
* Cluster the hash codes H using Llyod's K-Means clustering
* Inputs:
*
* Arguments:
* ---------
* Inputs:
* hashes : hashes to be clustered
*
* Buffers:
* centroids : centroids buffer
* distances : distances buffer
* bitcounts : cluster_bit_counts buffer
*
* Outputs:
* clusters : Store the groups/labels/assignments
* counts : Store the counts of the number of points in each cluster
*/
void cluster(
const torch::Tensor hashes,
const torch::Tensor lengths,
torch::Tensor centroids,
torch::Tensor distances,
torch::Tensor bitcounts,
torch::Tensor clusters,
torch::Tensor counts,
int n_iterations,
int B
) {
int K = centroids.size(2);
int N = hashes.size(0);
int H = hashes.size(1);
int L = hashes.size(2);
// initialize the centroids
//centroids.view({-1, K}) = hashes.view({-1, L}).narrow(1, 0, K);
kmeans(
hashes,
lengths,
centroids,
distances,
bitcounts,
clusters,
counts,
n_iterations
);
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("cluster", &cluster, "Cluster the hashed vectors by "
"performing a few iterations of k-means");
}
|
the_stack
|
#include <array/NDArrayFactory.h>
#include <helpers/ConstantTadHelper.h>
#include <helpers/MmulHelper.h>
#include <helpers/ShapeUtils.h>
#include <ops/declarable/helpers/top_k.h>
//#include <ops/declarable/generic/helpers/BroadcastHelper.h>
#include <cusolverDn.h>
#include <exceptions/cuda_exception.h>
namespace sd {
namespace ops {
namespace helpers {
// ------------------------------------------------------------------------------------------------------------------ //
// invert the second diagonal for lower diagonal matrix
template <typename T>
static SD_KERNEL void invertKernelLow(void *invertedBuf, const sd::LongType *invertedShape, const void *inputBuf,
const sd::LongType *inputShape, sd::LongType n) {
auto inverted = reinterpret_cast<T *>(invertedBuf);
auto input = reinterpret_cast<const T *>(inputBuf);
auto start = threadIdx.x + blockIdx.x * blockDim.x;
auto step = blockDim.x * gridDim.x;
for (int i = start + 1; i < n; i += step) {
sd::LongType pos[] = {i, i - 1};
sd::LongType posX[] = {i, i};
sd::LongType posY[] = {i - 1, i - 1};
auto xIndex = shape::getOffset(inputShape, pos);
auto dxIndex = shape::getOffset(inputShape, posX);
auto dyIndex = shape::getOffset(inputShape, posY);
auto zIndex = shape::getOffset(invertedShape, pos);
// invert lower triangular matrix
inverted[zIndex] = -input[xIndex] / (input[dxIndex] * input[dyIndex]);
// math::atomics::sd_atomicAdd(&inverted[zIndex], - input[xIndex] * inverted[iIndex] / input[dIndex]);
}
}
// ------------------------------------------------------------------------------------------------------------------ //
// invert diagonal vals to upper diagonal matrix
template <typename T>
static SD_KERNEL void upvertKernel(void *invertedBuf, const sd::LongType *invertedShape, const void *inputBuf,
const sd::LongType *inputShape, sd::LongType n) {
auto inverted = reinterpret_cast<T *>(invertedBuf);
auto input = reinterpret_cast<const T *>(inputBuf);
auto start = threadIdx.x + blockIdx.x * blockDim.x;
auto step = blockDim.x * gridDim.x;
for (int i = start; i < n; i += step) {
sd::LongType pos[] = {i, i};
auto xIndex = shape::getOffset(inputShape, pos);
auto zIndex = shape::getOffset(invertedShape, pos);
// invert diagonal elements
inverted[zIndex] /= input[xIndex];
}
}
// ------------------------------------------------------------------------------------------------------------------ //
// invert upper second diagonal
template <typename T>
static SD_KERNEL void upvertKernelUp(void *invertedBuf, const sd::LongType *invertedShape, const void *inputBuf,
const sd::LongType *inputShape, sd::LongType n) {
__shared__ T *inverted;
__shared__ const T *input;
if (threadIdx.x == 0) {
inverted = reinterpret_cast<T *>(invertedBuf);
input = reinterpret_cast<const T *>(inputBuf);
}
__syncthreads();
auto start = threadIdx.x + blockIdx.x * blockDim.x;
auto step = blockDim.x * gridDim.x;
for (int i = start; i < n - 1; i += step) {
sd::LongType pos[] = {i, i + 1};
sd::LongType posX[] = {i + 1, i + 1};
auto xIndex = shape::getOffset(inputShape, pos);
auto iIndex = shape::getOffset(invertedShape, posX);
auto zIndex = shape::getOffset(invertedShape, pos);
// invert upper matrix
math::atomics::sd_atomicAdd(&inverted[zIndex], -input[xIndex] * inverted[iIndex]); // / input[yIndex]);
// inputMatrix->t<T>(i, i + 1) * invertedMatrix->t<T>(i + 1, i + 1) / inputMatrix->t<T>(i, i)
}
}
// ------------------------------------------------------------------------------------------------------------------ //
template <typename T>
static SD_KERNEL void invertLowKernel(void *invertedBuf, const sd::LongType *invertedShape, const void *inputBuf,
const sd::LongType *inputShape, sd::LongType n) {
auto input = reinterpret_cast<const T *>(inputBuf);
auto inverted = reinterpret_cast<T *>(invertedBuf);
auto tid = blockIdx.x * blockDim.x + threadIdx.x;
auto step = gridDim.x * blockDim.x;
for (int i = tid + 2; i < n; i += step) {
for (int j = i - 2; j >= 0; --j)
for (int k = 0; k < i; k++) {
sd::LongType posZ[] = {i, j};
sd::LongType posY[] = {k, j};
sd::LongType posX[] = {i, k};
sd::LongType posD[] = {i, i};
auto xIndex = shape::getOffset(inputShape, posX);
auto yIndex = shape::getOffset(invertedShape, posY);
auto dIndex = shape::getOffset(inputShape, posD);
auto zIndex = shape::getOffset(invertedShape, posZ);
// invert non-diagonal elements
math::atomics::sd_atomicAdd(&inverted[zIndex], -inverted[yIndex] * input[xIndex] / input[dIndex]);
}
}
}
// ------------------------------------------------------------------------------------------------------------------ //
// Invertion of upper triangular matrix non-diagonal elements when main and second diagonals already processed
template <typename T>
static SD_KERNEL void invertUpKernel(void *invertedBuf, const sd::LongType *invertedShape, const void *inputBuf,
const sd::LongType *inputShape, sd::LongType n) {
auto inverted = reinterpret_cast<T *>(invertedBuf);
;
auto input = reinterpret_cast<const T *>(inputBuf);
auto tid = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
for (int i = (int)n - tid - 2; i >= 0; i -= step) {
for (int j = i + 2; j < (int)n; j++)
for (int k = i; k < (int)n; k++) {
sd::LongType posZ[] = {i, j};
sd::LongType posY[] = {k, j};
sd::LongType posX[] = {i, k};
// inversion with Joardan Gauss transformation
auto xIndex = shape::getOffset(inputShape, posX);
auto yIndex = shape::getOffset(invertedShape, posY);
auto zIndex = shape::getOffset(invertedShape, posZ);
// invert upper non-diagonal elements
math::atomics::sd_atomicAdd(&inverted[zIndex], -inverted[yIndex] * input[xIndex]);
}
}
}
// ------------------------------------------------------------------------------------------------------------------ //
// procedure to invert lower-triangular matrix.
// In current case lower triangular matrix has main diagonal with general values
//
template <typename T>
static void invertLowerMatrix_(LaunchContext *context, NDArray *inputMatrix, NDArray *invertedMatrix) {
int n = inputMatrix->rows();
invertedMatrix->setIdentity();
if (inputMatrix->isIdentityMatrix()) return;
auto stream = context->getCudaStream();
// invert lower matrix
// invert main diagonal
upvertKernel<T><<<1, n, 512, *stream>>>(invertedMatrix->specialBuffer(), invertedMatrix->specialShapeInfo(),
inputMatrix->specialBuffer(), inputMatrix->specialShapeInfo(), n);
// invert the second diagonal
invertKernelLow<T><<<1, n, 512, *stream>>>(invertedMatrix->specialBuffer(), invertedMatrix->specialShapeInfo(),
inputMatrix->specialBuffer(), inputMatrix->specialShapeInfo(), n);
// invert non-diagonal elements
invertLowKernel<T><<<n, n, 512, *stream>>>(invertedMatrix->specialBuffer(), invertedMatrix->specialShapeInfo(),
inputMatrix->specialBuffer(), inputMatrix->specialShapeInfo(), n);
}
// ------------------------------------------------------------------------------------------------------------------ //
// caller for invert lower matrix routine
void invertLowerMatrix(LaunchContext *context, NDArray *inputMatrix, NDArray *invertedMatrix) {
NDArray::prepareSpecialUse({invertedMatrix}, {inputMatrix});
BUILD_SINGLE_SELECTOR(inputMatrix->dataType(), invertLowerMatrix_, (context, inputMatrix, invertedMatrix),
SD_FLOAT_NATIVE);
NDArray::registerSpecialUse({invertedMatrix}, {inputMatrix});
}
// ------------------------------------------------------------------------------------------------------------------ //
// procedure to invert upper-triangular matrix.
// In current case upper triangular matrix has main diagonal with all ones on it.
template <typename T>
static void invertUpperMatrix_(LaunchContext *context, NDArray *inputMatrix, NDArray *invertedMatrix) {
int n = inputMatrix->rows();
invertedMatrix->setIdentity();
auto stream = context->getCudaStream();
if (inputMatrix->isIdentityMatrix()) { // the inverse for I is I
return;
}
// invert upper matrix
// invert the second diagonal
upvertKernelUp<T><<<1, n, 512, *stream>>>(invertedMatrix->specialBuffer(), invertedMatrix->specialShapeInfo(),
inputMatrix->specialBuffer(), inputMatrix->specialShapeInfo(), n);
// invert other elements
invertUpKernel<T><<<n, n, 512, *stream>>>(invertedMatrix->specialBuffer(), invertedMatrix->specialShapeInfo(),
inputMatrix->specialBuffer(), inputMatrix->specialShapeInfo(), n);
}
// ------------------------------------------------------------------------------------------------------------------ //
// invertion of upper triangular matrix - runner routine
void invertUpperMatrix(LaunchContext *context, NDArray *inputMatrix, NDArray *invertedMatrix) {
NDArray::prepareSpecialUse({invertedMatrix}, {inputMatrix});
BUILD_SINGLE_SELECTOR(invertedMatrix->dataType(), invertUpperMatrix_, (context, inputMatrix, invertedMatrix),
SD_FLOAT_NATIVE);
NDArray::prepareSpecialUse({invertedMatrix}, {inputMatrix});
}
// ------------------------------------------------------------------------------------------------------------------ //
// determinant kernel - accumulation product of all values on the main diagonal
template <typename T>
static SD_KERNEL void determinantKernel(T *compound, T *result, sd::LongType len) {
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
for (auto i = start; i < len; i += step) {
auto pos = i * len + i; // shape::getOffset(0, shape::shapeOf(shape), shape::stride(shape), di, 2);
// multiply all diagonal elements
math::atomics::sd_atomicMul(&result[0], compound[pos]);
}
}
// ------------------------------------------------------------------------------------------------------------------ //
// determinant logarithm - accumulation sum of all logarithm values on the main diagonal. All in logarithic values
// should be positive
template <typename T>
static SD_KERNEL void determinantLogKernel(T *compound, T *result, sd::LongType len) {
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
for (auto i = start; i < len; i += step) {
auto pos = i * len + i; // shape::getOffset(0, shape::shapeOf(shape), shape::stride(shape), di, 2);
// sum logs of all diagonal elements
math::atomics::sd_atomicAdd(result, math::sd_log<T, T>(math::sd_abs(compound[pos])));
}
}
// ------------------------------------------------------------------------------------------------------------------ //
// kernel to copy matrix with given shape to compound tensor with given pos
// output - a N-D tensor buffer with rank not less than 2, input - 2D square n x n matrix with n = rowLen
template <typename T, typename F>
static SD_KERNEL void fillMatrix(void *output, const sd::LongType *outShape, const void *input,
const sd::LongType *inputShape, sd::LongType pos, sd::LongType rowLen) {
__shared__ F *matrix;
__shared__ const T *inputBuf;
__shared__ sd::LongType inputLen;
__shared__ sd::LongType n2;
if (threadIdx.x == 0) {
matrix = reinterpret_cast<F *>(output);
inputBuf = reinterpret_cast<const T *>(input);
inputLen = shape::length(inputShape);
n2 = rowLen * rowLen;
}
__syncthreads();
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
for (int k = pos + start, j = start; j < n2; k += step, j += step) {
auto xIndex = shape::getIndexOffset(k, inputShape);
matrix[j] = (F)inputBuf[xIndex];
}
}
// ------------------------------------------------------------------------------------------------------------------ //
// same as above, but without type conversion
template <typename T>
static SD_KERNEL void returnMatrix(void *output, const sd::LongType *outputShape, const void *input,
const sd::LongType *inputShape, sd::LongType pos, sd::LongType rowLen) {
__shared__ sd::LongType outputLen;
__shared__ sd::LongType n2;
auto matrix = reinterpret_cast<const T *>(input);
auto outputBuf = reinterpret_cast<T *>(output);
if (threadIdx.x == 0) {
outputLen = shape::length(inputShape);
n2 = rowLen * rowLen;
}
__syncthreads();
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
for (int k = pos + start, j = start; j < n2; k += step, j += step) {
auto zIndex = shape::getIndexOffset(k, outputShape);
outputBuf[zIndex] = matrix[j];
}
}
// ------------------------------------------------------------------------------------------------------------------ //
// fill up permutaion matrix kernel. Permutation matrix filled with zeros and ones
template <typename F>
static SD_KERNEL void fillUpPermutation(void *output, const sd::LongType *shape, int *source, int rowNum) {
F *permutation = reinterpret_cast<F *>(output);
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
for (auto i = start; i < rowNum; i += step) {
int val = source[i] - 1;
sd::LongType posF[] = {i, val};
auto pos = shape::getOffset(shape, posF);
permutation[pos] = F(1.f);
}
}
// ------------------------------------------------------------------------------------------------------------------ //
// LUP decomposition runner - using CUBLAS SOLVER
// if permutation is given, then using LUP decomposition, LU decomposition otherwise
// L - lower triangular, U - upper triangular, P - permutation matricies
// PA = LU
//
// input - A matrix nxn
// compound - C matrix L + U - I, or main diagonal and lower - L matrix, from the 2nd diagonal - U matrix
template <typename T, typename I>
static void lup_(LaunchContext *context, NDArray *input, NDArray *compound, NDArray *permutation) {
auto stream = context->getCudaStream();
auto n = input->rows();
std::lock_guard<std::mutex> lock(*LaunchContext::deviceMutex());
cusolverDnHandle_t *cusolverH = (cusolverDnHandle_t *)context->getCusolverHandle(); // nullptr;
// create solver handle
cusolverStatus_t status; // cusolverDnCreate(&cusolverH);
// if (CUSOLVER_STATUS_SUCCESS != status) {
// throw cuda_exception::build("Cannot create cuSolver handle", status);
// }
// set solver stream
status = cusolverDnSetStream(*cusolverH, *stream);
if (CUSOLVER_STATUS_SUCCESS != status) {
throw cuda_exception::build("Cannot set up stream for cuda solver", status);
}
int lwork = 0;
int *d_info = nullptr;
// allocate memory for permutation vector
auto err = cudaMalloc((void **)&d_info, sizeof(int));
if (err) {
throw cuda_exception::build("helpers::lup_: Cannot allocate memory for solver info buffer", err);
}
DataType dtype = input->dataType();
switch (dtype) { // there are two implementations with cublas for LUP decomposition - double and float
case DataType::DOUBLE: {
double *d_work = nullptr;
// compute internal buffer size
double *matrix = reinterpret_cast<double *>(input->specialBuffer());
status = cusolverDnDgetrf_bufferSize(*cusolverH, n, n, matrix, n, &lwork);
if (CUSOLVER_STATUS_SUCCESS != status) {
throw cuda_exception::build("helpers::lup_: Cannot create cuSolver handle", status);
}
err = cudaMalloc((void **)&d_work, sizeof(float) * lwork);
if (err) {
throw cuda_exception::build("helpers::lup_: Cannot allocate memory for solver data buffer", err);
}
if (permutation == nullptr) {
status = cusolverDnDgetrf(*cusolverH, n, n, matrix, n, d_work, nullptr, d_info);
if (status != CUSOLVER_STATUS_SUCCESS) {
throw cuda_exception::build("helpers::lup_: LU factorization is failed due ", status);
}
} else {
NDArray permutVector('c', {n}, sd::DataType::INT32, context);
int *permutationBuf = permutVector.dataBuffer()->specialAsT<int>();
status = cusolverDnDgetrf(*cusolverH, n, n, matrix, n, d_work, permutationBuf, d_info);
if (status != CUSOLVER_STATUS_SUCCESS) {
throw cuda_exception::build("helpers::lup_: LU factorization is failed due ", status);
}
if (permutation->rankOf() == 2) {
fillUpPermutation<double><<<n, n, 1024, *stream>>>(permutation->specialBuffer(),
permutation->specialShapeInfo(), permutationBuf, n);
} else {
permutVector.tickWriteDevice();
input->tickWriteDevice();
compound->assign(input);
permutation->assign(permutVector);
}
}
err = cudaFree(d_work);
if (err) {
throw cuda_exception::build("helpers::lup_: Cannot deallocate memory for solver data buffer", err);
}
} break;
case DataType::FLOAT32: {
float *matrix = reinterpret_cast<float *>(input->specialBuffer());
float *d_work = nullptr;
status = cusolverDnSgetrf_bufferSize(*cusolverH, n, n, matrix, n, &lwork);
if (CUSOLVER_STATUS_SUCCESS != status) {
throw cuda_exception::build("helpers::lup_: Cannot create cuSolver handle", status);
}
err = cudaMalloc((void **)&d_work, sizeof(float) * lwork);
if (err) {
throw cuda_exception::build("helpers::lup_: Cannot allocate memory for solver data buffer", err);
}
if (permutation == nullptr)
status = cusolverDnSgetrf(*cusolverH, n, n, matrix, n, d_work, nullptr, d_info);
else {
NDArray permutVector('c', {n}, DataType::INT32, context);
int *permutationBuf = reinterpret_cast<int *>(permutVector.specialBuffer());
status = cusolverDnSgetrf(*cusolverH, n, n, matrix, n, d_work, permutationBuf, d_info);
if (permutation->rankOf() == 2) {
fillUpPermutation<I><<<n, n, 128, *stream>>>(permutation->specialBuffer(), permutation->specialShapeInfo(),
permutationBuf, n);
permutation->tickWriteDevice();
} else {
input->tickWriteDevice();
compound->assign(input);
permutation->assign(permutVector);
}
}
err = cudaFree(d_work);
if (err) {
throw cuda_exception::build("helpers::lup_: Cannot deallocate memory for solver data buffer", err);
}
}
}
if (CUSOLVER_STATUS_SUCCESS != status) {
throw cuda_exception::build("helpers::lup_: Cannot make LU decomposition", status);
}
err = cudaFree(d_info);
if (err) {
throw cuda_exception::build("helpers::lup_: Cannot deallocate memory for solver info buffer", err);
}
// cusolverDnDestroy(cusolverH);
// NDArray::registerSpecialUse({input}, {input});
input->tickWriteDevice();
}
// ------------------------------------------------------------------------------------------------------------------ //
BUILD_DOUBLE_TEMPLATE(template void lup_,
(LaunchContext * context, NDArray *input, NDArray *output, NDArray *permutation), SD_FLOAT_NATIVE,
SD_INDEXING_TYPES);
template <typename T>
static SD_DEVICE void swapRows(T *matrix, const sd::LongType *shape, sd::LongType theFirst, sd::LongType theSecond,
sd::LongType n) {
if (theFirst != theSecond) {
for (auto i = 0; i < n; i++) {
sd::LongType theFirstPos[] = {theFirst, i};
sd::LongType theSecondPos[] = {theSecond, i};
auto theFirstIndex = shape::getOffset(shape, theFirstPos, 0);
auto theSecondIndex = shape::getOffset(shape, theSecondPos, 0);
math::sd_swap(matrix[theFirstIndex], matrix[theSecondIndex]);
}
}
}
template <typename T>
static SD_DEVICE void processColumns(sd::LongType currentRow, sd::LongType rowNum, T *compoundBuf,
const sd::LongType *compoundShape) {
sd::LongType xDiag[] = {currentRow, currentRow};
auto diagIndex = shape::getOffset(compoundShape, xDiag, 0);
for (auto j = currentRow + 1; j < rowNum; j++) {
sd::LongType xRow[] = {j, currentRow};
auto rowIndex = shape::getOffset(compoundShape, xRow, 0);
compoundBuf[rowIndex] /= compoundBuf[diagIndex]; // output->t<T>(i, i);
for (auto k = currentRow + 1; k < rowNum; k++) {
sd::LongType yRow[] = {j, k};
sd::LongType yCol[] = {currentRow, k};
auto rowIndexY = shape::getOffset(compoundShape, yRow, 0);
auto colIndex = shape::getOffset(compoundShape, yCol, 0);
compoundBuf[rowIndexY] -= compoundBuf[rowIndex] * compoundBuf[colIndex];
}
}
}
template <typename T>
SD_DEVICE sd::LongType argmaxCol(sd::LongType column, T *compoundBuffer, const sd::LongType *compoundShape) {
auto rowNum = shape::sizeAt(compoundShape, 0);
sd::LongType xInitial[] = {column, column};
auto xInitialIndex = shape::getOffset(compoundShape, xInitial, 0);
auto maxValue = T(0); // sd::math::sd_abs(compoundBuffer[xInitialIndex]);
auto result = -1LL;
for (auto rowCounter = column; rowCounter < rowNum; rowCounter++) {
sd::LongType xPos[] = {rowCounter, column};
auto xIndex = shape::getOffset(compoundShape, xPos, 0);
if (sd::math::sd_abs(compoundBuffer[xIndex]) > maxValue) {
maxValue = sd::math::sd_max(maxValue, sd::math::sd_abs(compoundBuffer[xIndex]));
result = rowCounter;
}
}
return result;
}
template <typename T, typename I>
static SD_DEVICE int luNN(T *matrix, const sd::LongType *shape, I *permutation, const sd::LongType *permuShape,
sd::LongType n) {
for (auto i = 0; i < n - 1; i++) {
auto pivotIndex = argmaxCol(i, matrix, shape);
if (pivotIndex < 0) {
return -1; // throw std::runtime_error("helpers::luNN_: input matrix is singular.");
}
math::sd_swap(permutation[shape::getIndexOffset(i, permuShape)],
permutation[shape::getIndexOffset(pivotIndex, permuShape)]);
swapRows(matrix, shape, (sd::LongType)i, pivotIndex, n);
processColumns(i, n, matrix, shape);
}
return 0;
}
template <typename T, typename I>
static SD_KERNEL void luBatchedKernel(T *outputBuf, const sd::LongType *outputShape, I *permutations,
const sd::LongType *permuShape, const sd::LongType *outputTadShape,
const sd::LongType *outputTadOffsets, const sd::LongType *permuTadShape,
const sd::LongType *permuTadOffsets, sd::LongType batchNum) {
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
for (auto b = start; b < batchNum; b += step) {
T *matrix = outputBuf + outputTadOffsets[b];
I *permutation = permutations + permuTadOffsets[b];
if (0 != luNN(matrix, outputTadShape, permutation, permuTadShape, shape::length(permuTadShape))) break;
}
}
template <typename T, typename I>
static void lu_(LaunchContext *context, NDArray *input, NDArray *output, NDArray *permutationVectors) {
auto n = input->sizeAt(-1);
auto stream = context->getCudaStream();
NDArray iota('c', {n}, permutationVectors->dataType(), context); // = NDArrayFactory::create(); // <int>('c', {n});
iota.linspace(0);
iota.syncToDevice();
output->assign(input); // fill up output tensor with zeros
// output->tickWriteDevice();
permutationVectors->applyTrueBroadcast(sd::BroadcastOpsTuple::Assign(), iota, *permutationVectors, true, nullptr);
// permutationVectors->tickWriteDevice();
auto tads = ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), {-2, -1});
auto permutaionTads = ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), {-1});
auto batchNum = tads.numberOfTads();
luBatchedKernel<T, I><<<batchNum, 256, 1024, *stream>>>(
reinterpret_cast<T *>(output->platformBuffer()), output->specialShapeInfo(),
reinterpret_cast<I *>(permutationVectors->platformBuffer()), permutationVectors->specialShapeInfo(),
tads.specialShapeInfo(), tads.specialOffsets(), permutaionTads.specialShapeInfo(),
permutaionTads.specialOffsets(), batchNum);
}
void lu(LaunchContext *context, NDArray *input, NDArray *output, NDArray *permutations) {
NDArray::prepareSpecialUse({output, permutations}, {input});
BUILD_DOUBLE_SELECTOR(input->dataType(), permutations->dataType(), lu_, (context, input, output, permutations),
SD_FLOAT_NATIVE, SD_INDEXING_TYPES);
NDArray::registerSpecialUse({output, permutations}, {input});
}
// ------------------------------------------------------------------------------------------------------------------ //
template <typename T>
static sd::Status determinant_(sd::LaunchContext *context, NDArray *input, NDArray *output) {
sd::LongType n = input->sizeAt(-1);
sd::LongType n2 = n * n;
std::vector<int> dims();
auto packX =
ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), {input->rankOf() - 2, input->rankOf() - 1});
// auto packZ = ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), {output->rankOf() - 1});
// DataType dtype = input->dataType();
// if (dtype != DataType::DOUBLE)
// dtype = DataType::FLOAT32;
auto matrix =
NDArrayFactory::create(input->ordering(), {n, n}, DataTypeUtils::fromT<T>(), context); //, block.getWorkspace());
auto det = NDArrayFactory::create<T>(1, context);
auto stream = context->getCudaStream();
NDArray::prepareSpecialUse({output}, {input});
dim3 launchDims(256, 256, 1024);
output->assign(1.f);
for (int e = 0; e < output->lengthOf(); e++) {
sd::LongType pos = e * n2;
// if (matrix.dataType() == input->dataType())
fillMatrix<T, T><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(
matrix.specialBuffer(), matrix.specialShapeInfo(), input->specialBuffer(), input->specialShapeInfo(), pos, n);
// else
// fillMatrix<T, float><<<launchDims.x, launchDims.y, launchDims.z,
// *stream>>>(matrix.specialBuffer(), matrix.specialShapeInfo(), input->specialBuffer(),
// input->special(), pos, n);
lup_<T, int>(context, &matrix, nullptr, nullptr);
// else
// lup_<float>(context, &matrix, nullptr, nullptr);
auto offset = shape::getIndexOffset(e, output->shapeInfo());
auto inputBuf = reinterpret_cast<T *>(matrix.specialBuffer());
auto outputBuf = reinterpret_cast<T *>(output->specialBuffer()) + offset;
// if (matrix.dataType() == input->dataType())
determinantKernel<T><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(inputBuf, outputBuf, n);
// else
// determinantKernel<T, float><<<launchDims.x, launchDims.y, launchDims.z, *stream >>> (inputBuf,
// outputBuf, n);
}
NDArray::registerSpecialUse({output}, {input});
return sd::Status::OK;
}
sd::Status determinant(sd::LaunchContext *context, NDArray *input, NDArray *output) {
NDArray::prepareSpecialUse({output}, {input});
BUILD_SINGLE_SELECTOR(input->dataType(), return determinant_, (context, input, output), SD_FLOAT_NATIVE);
NDArray::registerSpecialUse({output}, {input});
}
template <typename T>
sd::Status logAbsDeterminant_(LaunchContext *context, NDArray *input, NDArray *output) {
sd::LongType n = input->sizeAt(-1);
sd::LongType n2 = n * n;
std::vector<int> dims();
auto packX =
ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), {input->rankOf() - 2, input->rankOf() - 1});
// auto packZ = ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), {output->rankOf() - 1});
DataType dtype = input->dataType();
if (dtype != DataType::DOUBLE) dtype = DataType::FLOAT32;
auto matrix = NDArrayFactory::create(input->ordering(), {n, n}, dtype, context); //, block.getWorkspace());
auto det = NDArrayFactory::create<T>(1, context);
auto stream = context->getCudaStream();
NDArray::prepareSpecialUse({output}, {input});
dim3 launchDims(256, 256, 1024);
output->assign(0.f);
for (int e = 0; e < output->lengthOf(); e++) {
sd::LongType pos = e * n2;
// if (matrix.dataType() == input->dataType())
fillMatrix<T, T><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(
matrix.specialBuffer(), matrix.specialShapeInfo(), input->specialBuffer(), input->specialShapeInfo(), pos, n);
// else
// fillMatrix<T, float><<<launchDims.x, launchDims.y, launchDims.z,
// *stream>>>(matrix.specialBuffer(), matrix.specialShapeInfo(), input->specialBuffer(),
// input->special(), pos, n);
// if (matrix.dataType() == input->dataType())
lup_<T, int>(context, &matrix, nullptr, nullptr);
// else
// lup_<float>(context, &matrix, nullptr, nullptr);
auto offset = shape::getIndexOffset(e, output->shapeInfo());
auto inputBuf = reinterpret_cast<T *>(matrix.specialBuffer());
auto outputBuf = reinterpret_cast<T *>(output->specialBuffer()) + offset;
// if (matrix.dataType() == input->dataType())
determinantLogKernel<T><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(inputBuf, outputBuf, n);
// else
// determinantLogKernel<T, float><<<launchDims.x, launchDims.y, launchDims.z, *stream >>> (inputBuf,
// outputBuf, n);
}
NDArray::registerSpecialUse({output}, {input});
return sd::Status::OK;
}
sd::Status logAbsDeterminant(sd::LaunchContext *context, NDArray *input, NDArray *output) {
NDArray::prepareSpecialUse({output}, {input});
BUILD_SINGLE_SELECTOR(input->dataType(), return logAbsDeterminant_, (context, input, output), SD_FLOAT_NATIVE);
NDArray::registerSpecialUse({output}, {input});
}
template <typename T>
static SD_KERNEL void fillLowerUpperKernel(void *lowerBuf, const sd::LongType *lowerShape, void *upperBuf,
const sd::LongType *upperShape, void *matrixBuf,
const sd::LongType *matrixShape, sd::LongType n) {
__shared__ T *lowerMatrix;
__shared__ T *upperMatrix;
__shared__ T *matrix;
if (threadIdx.x == 0) {
lowerMatrix = reinterpret_cast<T *>(lowerBuf);
upperMatrix = reinterpret_cast<T *>(upperBuf);
matrix = reinterpret_cast<T *>(matrixBuf);
}
__syncthreads();
for (int k = blockIdx.x; k < n; k += gridDim.x) { // and then put all values under main diagonal on to it
for (int j = threadIdx.x; j < n; j += blockDim.x) {
sd::LongType posX[] = {k, j};
sd::LongType posD[] = {j, j};
auto xPos = shape::getOffset(lowerShape, posX);
auto yPos = shape::getOffset(upperShape, posX);
auto iPos = shape::getOffset(matrixShape, posX);
auto dPos = shape::getOffset(matrixShape, posD);
if (k >= j)
lowerMatrix[xPos] = matrix[iPos]; //(k, j);
else
upperMatrix[yPos] = matrix[iPos]; // k, j);
}
}
}
template <typename T>
static sd::Status inverse_(sd::LaunchContext *context, NDArray *input, NDArray *output) {
auto n = input->sizeAt(-1);
auto n2 = n * n;
auto dtype = DataTypeUtils::fromT<T>(); // input->dataType();
// if (dtype != DataType::DOUBLE)
// dtype = DataType::FLOAT32;
NDArray matrix = NDArrayFactory::create('c', {n, n}, dtype, context);
NDArray upper = NDArrayFactory::create('c', {n, n}, dtype, context);
NDArray lower = NDArrayFactory::create('c', {n, n}, dtype, context);
NDArray compound = NDArrayFactory::create('c', {n, n}, dtype, context);
NDArray permutation = NDArrayFactory::create('c', {n, n}, dtype, context);
auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(),
{input->rankOf() - 2, input->rankOf() - 1});
auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(),
{output->rankOf() - 2, output->rankOf() - 1});
auto stream = context->getCudaStream();
for (auto i = 0LL; i < packX.numberOfTads(); i++) {
fillMatrix<T, T><<<1, n2, 1024, *stream>>>(matrix.specialBuffer(), matrix.specialShapeInfo(),
input->specialBuffer(), input->specialShapeInfo(), i * n2, n);
matrix.tickWriteDevice();
// compound.assign(matrix);
// if (matrix.dataType() == input->dataType())
lup_<T, int>(context, &matrix, nullptr, nullptr);
fillLowerUpperKernel<T><<<n, n, 1024, *stream>>>(lower.specialBuffer(), lower.specialShapeInfo(),
upper.specialBuffer(), upper.specialShapeInfo(),
matrix.specialBuffer(), matrix.specialShapeInfo(), n);
lower.tickWriteDevice();
upper.tickWriteDevice();
matrix.assign(0);
invertUpperMatrix(context, &upper, &matrix); // U^{-1}
matrix.tickWriteDevice();
compound.assign(0);
invertLowerMatrix(context, &lower, &compound); // L{-1}
compound.tickWriteDevice();
sd::MmulHelper::mmul(&matrix, &compound, &upper, 1.0, 0.0);
upper.tickWriteDevice();
returnMatrix<T><<<1, n2, 1024, *stream>>>(output->specialBuffer(), output->specialShapeInfo(),
upper.specialBuffer(), upper.specialShapeInfo(), i * n2, n);
}
return sd::Status::OK;
}
sd::Status inverse(sd::LaunchContext *context, NDArray *input, NDArray *output) {
NDArray::prepareSpecialUse({output}, {input});
BUILD_SINGLE_SELECTOR(input->dataType(), return inverse_, (context, input, output), SD_FLOAT_NATIVE);
NDArray::registerSpecialUse({output}, {input});
}
bool checkCholeskyInput(sd::LaunchContext *context, NDArray const *input) { return true; }
template <typename F>
SD_KERNEL void fillBatchKernel(F **dArrayBatch, F *buf, const sd::LongType *offsets, sd::LongType batchSize) {
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
for (auto i = start; i < batchSize; i += step) {
dArrayBatch[i] = buf + offsets[i];
}
}
template <typename F>
SD_KERNEL void adjustResultsKernel(F *dArray, const sd::LongType *shape, const sd::LongType *offsets,
sd::LongType batchSize, sd::LongType n) {
// auto i = blockIdx.x * blockDim.x + threadIdx.x;
sd::LongType *shapeOf = shape::shapeOf(shape);
sd::LongType *strideOf = shape::stride(shape);
for (auto i = blockIdx.x; i < batchSize; i += gridDim.x) {
auto current = dArray + offsets[i];
for (auto r = threadIdx.x; r < n; r += blockDim.x) {
for (auto c = r + 1; c < n; c++) {
sd::LongType posRC[] = {r, c};
auto pos = r * n + c; // shape::getOffset(0, shapeOf, strideOf, posRC, 2);
current[pos] = 0.;
}
}
}
}
template <typename F>
sd::Status cholesky__(LaunchContext *context, NDArray *input, NDArray *output, bool inplace) {
if (!inplace) output->assign(input);
auto tempOutput = output->dup();
cusolverDnHandle_t handle = nullptr;
auto n = input->sizeAt(-1);
auto n2 = n * n;
NDArray::prepareSpecialUse({output}, {input});
auto status = cusolverDnCreate(&handle);
if (CUSOLVER_STATUS_SUCCESS != status) {
throw cuda_exception::build("helpers::cholesky_: Cannot create solver handle", status);
}
F **dArrayBatch = nullptr;
auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(
tempOutput.shapeInfo(), {tempOutput.rankOf() - 2, tempOutput.rankOf() - 1});
const sd::LongType batchSize = packX.numberOfTads();
int *dInfoArray = nullptr;
auto err = cudaMalloc((void **)&dArrayBatch, sizeof(F *) * batchSize);
if (err) {
throw cuda_exception::build("helpers::cholesky_: Cannot allocate memory for solver batch data buffer", err);
}
err = cudaMalloc((void **)&dInfoArray, sizeof(int) * batchSize);
if (err) {
throw cuda_exception::build("helpers::cholesky_: Cannot allocate memory for solver errors buffer", err);
}
auto stream = context->getCudaStream();
fillBatchKernel<F><<<1, batchSize, 128, *stream>>>(dArrayBatch, reinterpret_cast<F *>(tempOutput.specialBuffer()),
packX.specialOffsets(), batchSize);
status = cusolverDnSetStream(handle, *stream);
if (CUSOLVER_STATUS_SUCCESS != status) {
throw cuda_exception::build("helpers::cholesky_: Cannot set stream to solver handle", status);
}
const cublasFillMode_t uplo = CUBLAS_FILL_MODE_UPPER;
if (input->dataType() == DataType::DOUBLE)
status = cusolverDnDpotrfBatched(handle, uplo, n, (double **)dArrayBatch, n, dInfoArray, batchSize);
else
status = cusolverDnSpotrfBatched(handle, uplo, n, (float **)dArrayBatch, n, dInfoArray, batchSize);
if (CUSOLVER_STATUS_SUCCESS != status) {
throw cuda_exception::build("helpers::cholesky_: Cholesky factorization failed for batch", status);
}
adjustResultsKernel<F><<<batchSize, n2, 128, *stream>>>(reinterpret_cast<F *>(tempOutput.specialBuffer()),
packX.specialShapeInfo(), packX.specialOffsets(), batchSize,
n);
err = cudaFree(dArrayBatch);
if (err) {
throw cuda_exception::build("helpers::cholesky_: Cannot deallocate memory for solver batch data buffer", err);
}
err = cudaFree(dInfoArray);
if (err) {
throw cuda_exception::build("helpers::cholesky_: Cannot allocate memory for solver errors buffer", err);
}
if (!inplace)
output->assign(tempOutput);
else
input->assign(tempOutput);
NDArray::registerSpecialUse({output}, {input});
return sd::Status::OK;
}
// template <typename T>
sd::Status cholesky_(LaunchContext *context, NDArray *input, NDArray *output, bool inplace) {
NDArray::prepareSpecialUse({output}, {input});
if (input->dataType() == DataType::DOUBLE)
cholesky__<double>(context, input, output, inplace);
else if (input->dataType() == DataType::FLOAT32)
cholesky__<float>(context, input, output, inplace);
else {
std::unique_ptr<NDArray> tempOutput(
NDArrayFactory::create_('c', input->getShapeAsVector(), DataType::FLOAT32, context));
tempOutput->assign(input);
cholesky__<float>(context, tempOutput.get(), tempOutput.get(), true);
output->assign(tempOutput.get());
}
NDArray::registerSpecialUse({output}, {input});
return sd::Status::OK;
}
sd::Status cholesky(sd::LaunchContext *context, NDArray *input, NDArray *output, bool inplace) {
// BUILD_SINGLE_SELECTOR(input->dataType(), return cholesky_, (context, input, output, inplace),
// SD_FLOAT_TYPES);
return cholesky_(context, input, output, inplace);
}
// BUILD_SINGLE_TEMPLATE(template sd::Status cholesky_, (LaunchContext* context, NDArray* input, NDArray* output,
// bool inplace), SD_FLOAT_TYPES);
BUILD_SINGLE_TEMPLATE(template sd::Status inverse_, (sd::LaunchContext * context, NDArray *input, NDArray *output),
SD_FLOAT_NATIVE);
template <typename T>
SD_KERNEL void logDetKernel(const T *inputBuf, const sd::LongType *inputShape, sd::LongType batchNum,
const sd::LongType *tadShape, const sd::LongType *tadOffsets, T *outputBuf,
const sd::LongType *outputShape) {
__shared__ int n;
if (threadIdx.x == 0) {
n = shape::sizeAt(inputShape, -1); // * shape::sizeAt(inputShape, -1);
}
__syncthreads();
auto output = outputBuf;
auto input = inputBuf;
for (auto i = blockIdx.x; i < batchNum; i += gridDim.x) {
auto current = input + tadOffsets[i];
auto zIndex = shape::getIndexOffset(i, outputShape);
for (auto e = threadIdx.x; e < n; e += blockDim.x) {
sd::LongType diag[] = {e, e};
auto xIndex = shape::getOffset(tadShape, diag);
math::atomics::sd_atomicAdd(&output[zIndex], math::sd_log<T, T>(current[xIndex] * current[xIndex]));
}
}
}
template <typename T>
sd::Status logdetFunctor_(sd::LaunchContext *context, NDArray *input, NDArray *output) {
NDArray::prepareSpecialUse({output}, {input});
auto n2 = input->sizeAt(-1) * input->sizeAt(-2);
auto stream = context->getCudaStream();
NDArray tempOutput(*input);
cholesky(context, input, &tempOutput, false);
auto outputBuf = output->dataBuffer()
->specialAsT<T>(); // reinterpret_cast<T*>(output->specialBuffer()); // + e * n2; // + e * n2;
auto inputBuf = tempOutput.dataBuffer()->specialAsT<T>(); // reinterpret_cast<T*>(tempOutput.specialBuffer());
output->nullify();
auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(
tempOutput.shapeInfo(), {tempOutput.rankOf() - 2, tempOutput.rankOf() - 1});
logDetKernel<T><<<128, 512, 256, *stream>>>(inputBuf, tempOutput.specialShapeInfo(), packX.numberOfTads(),
packX.specialShapeInfo(), packX.specialOffsets(), outputBuf,
output->specialShapeInfo());
output->tickWriteDevice();
NDArray::registerSpecialUse({output}, {input});
return sd::Status::OK;
}
sd::Status logdetFunctor(sd::LaunchContext *context, NDArray *input, NDArray *output) {
BUILD_SINGLE_SELECTOR(output->dataType(), return logdetFunctor_, (context, input, output), SD_FLOAT_NATIVE);
}
/*
* lup - batched input, batched outputs
* */
sd::Status lup(LaunchContext *context, NDArray *input, NDArray *compound, NDArray *permutation) {
BUILD_DOUBLE_SELECTOR(input->dataType(), permutation->dataType(), lup_, (context, input, compound, permutation),
SD_FLOAT_NATIVE, SD_INDEXING_TYPES);
return sd::Status::OK;
}
// BUILD_SINGLE_TEMPLATE(template sd::Status logdetFunctor_,
// (sd::LaunchContext * context, NDArray * input, NDArray * output), SD_FLOAT_NATIVE);
} // namespace helpers
} // namespace ops
} // namespace sd
|
the_stack
|
#ifdef _WIN64
#define atoll(S) _atoi64(S)
#define fseek(S, S1, S2) _fseeki64(S, S1, S2)
#include <windows.h>
#else
#include <unistd.h>
#endif
using namespace std;
using namespace thrust::placeholders;
size_t total_count = 0, total_max;
clock_t tot;
unsigned int total_segments = 0, old_segments;
size_t process_count;
size_t alloced_sz = 0;
bool fact_file_loaded = 1;
bool verbose;
bool interactive, ssd, delta, star;
unsigned int prs;
void* d_v = nullptr;
void* s_v = nullptr;
queue<string> op_sort;
queue<string> op_presort;
queue<string> op_type;
bool op_case = 0;
string grp_val;
queue<string> op_value;
queue<int_type> op_nums;
queue<float_type> op_nums_f;
queue<unsigned int> op_nums_precision;
queue<string> col_aliases;
map<string, map<string, col_data> > data_dict;
unordered_map<string, unordered_map<unsigned long long int, size_t> > char_hash;
map<string, char*> index_buffers;
map<string, unsigned long long int*> idx_vals;
map<string, char*> buffers;
map<string, size_t> buffer_sizes;
size_t total_buffer_size;
queue<string> buffer_names;
void* alloced_tmp;
bool alloced_switch = 0;
map<string,CudaSet*> varNames; // STL map to manage CudaSet variables
map<string, unsigned int> cpy_bits;
map<string, long long int> cpy_init_val;
char* readbuff = nullptr;
thrust::device_vector<unsigned int> rcol_matches;
thrust::device_vector<int_type> rcol_dev;
struct f_equal_to
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return (((x-y) < EPSILON) && ((x-y) > -EPSILON));
}
};
struct f_less
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return ((y-x) > EPSILON);
}
};
struct f_greater
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return ((x-y) > EPSILON);
}
};
struct f_greater_equal_to
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return (((x-y) > EPSILON) || (((x-y) < EPSILON) && ((x-y) > -EPSILON)));
}
};
struct f_less_equal
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return (((y-x) > EPSILON) || (((x-y) < EPSILON) && ((x-y) > -EPSILON)));
}
};
struct f_not_equal_to
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return ((x-y) > EPSILON) || ((x-y) < -EPSILON);
}
};
struct long_to_float_type
{
__host__ __device__
float_type operator()(const int_type x)
{
return (float_type)x;
}
};
template <typename T>
struct power_functor : public thrust::unary_function<T,T>
{
unsigned int a;
__host__ __device__
power_functor(unsigned int a_) {
a = a_;
}
__host__ __device__
T operator()(T x)
{
return x*(unsigned int)pow((double)10,(double)a);
}
};
struct is_zero
{
__host__ __device__
bool operator()(const int &x)
{
return x == 0;
}
};
int get_utc_offset() {
time_t zero = 24*60*60L;
struct tm * timeptr;
int gmtime_hours;
/* get the local time for Jan 2, 1900 00:00 UTC */
timeptr = localtime( &zero );
gmtime_hours = timeptr->tm_hour;
/* if the local time is the "day before" the UTC, subtract 24 hours
from the hours to get the UTC offset */
if( timeptr->tm_mday < 2 )
gmtime_hours -= 24;
return gmtime_hours;
}
/*
the utc analogue of mktime,
(much like timegm on some systems)
*/
time_t tm_to_time_t_utc( struct tm * timeptr ) {
/* gets the epoch time relative to the local time zone,
and then adds the appropriate number of seconds to make it UTC */
return mktime( timeptr ) + get_utc_offset() * 3600;
}
/*class power_functor {
unsigned int a;
public:
power_functor(unsigned int a_) { a = a_; }
__host__ __device__ int_type operator()(int_type x) const
{
return x*(unsigned int)pow((double)10,(double)a);
}
};
*/
void allocColumns(CudaSet* a, queue<string> fields);
void copyColumns(CudaSet* a, queue<string> fields, unsigned int segment, size_t& count, bool rsz, bool flt);
void mygather(unsigned int tindex, unsigned int idx, CudaSet* a, CudaSet* t, size_t count, size_t g_size);
void mycopy(unsigned int tindex, unsigned int idx, CudaSet* a, CudaSet* t, size_t count, size_t g_size);
void write_compressed_char(string file_name, unsigned int index, size_t mCount);
size_t getFreeMem();
size_t getTotalSystemMemory();
void process_error(int severity, string err);
CudaSet::CudaSet(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, size_t Recs)
: mColumnCount(0), mRecCount(0)
{
initialize(nameRef, typeRef, sizeRef, colsRef, Recs);
source = 1;
text_source = 1;
fil_f = nullptr;
fil_s = nullptr;
};
CudaSet::CudaSet(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, size_t Recs, string file_name, unsigned int max)
: mColumnCount(0), mRecCount(0)
{
maxRecs = max;
initialize(nameRef, typeRef, sizeRef, colsRef, Recs, file_name);
source = 1;
text_source = 0;
fil_f = nullptr;
fil_s = nullptr;
};
CudaSet::CudaSet(const size_t RecordCount, const unsigned int ColumnCount)
{
initialize(RecordCount, ColumnCount);
keep = false;
source = 0;
text_source = 0;
fil_f = nullptr;
fil_s = nullptr;
};
CudaSet::CudaSet(queue<string> op_sel, const queue<string> op_sel_as)
{
initialize(op_sel, op_sel_as);
keep = false;
source = 0;
text_source = 0;
fil_f = nullptr;
fil_s = nullptr;
};
CudaSet::CudaSet(CudaSet* a, CudaSet* b, queue<string> op_sel, queue<string> op_sel_as)
{
initialize(a,b, op_sel, op_sel_as);
keep = false;
source = 0;
text_source = 0;
fil_f = nullptr;
fil_s = nullptr;
};
CudaSet::~CudaSet()
{
free();
};
void CudaSet::allocColumnOnDevice(string colname, size_t RecordCount)
{
if (type[colname] != 1 ) {
d_columns_int[colname].resize(RecordCount);
}
else
d_columns_float[colname].resize(RecordCount);
};
void CudaSet::resize_join(size_t addRecs)
{
mRecCount = mRecCount + addRecs;
for(unsigned int i=0; i < columnNames.size(); i++) {
if(type[columnNames[i]] != 1) {
h_columns_int[columnNames[i]].resize(mRecCount);
}
else
h_columns_float[columnNames[i]].resize(mRecCount);
};
};
void CudaSet::resize(size_t addRecs)
{
mRecCount = mRecCount + addRecs;
for(unsigned int i=0; i < columnNames.size(); i++) {
if(type[columnNames[i]] != 1) {
h_columns_int[columnNames[i]].resize(mRecCount);
}
else {
h_columns_float[columnNames[i]].resize(mRecCount);
}
};
};
void CudaSet::deAllocColumnOnDevice(string colname)
{
if (type[colname] != 1 && !d_columns_int.empty() && d_columns_int.find(colname) != d_columns_int.end()) {
if(d_columns_int[colname].size() > 0) {
d_columns_int[colname].resize(0);
d_columns_int[colname].shrink_to_fit();
};
}
else
if (type[colname] == 1 && !d_columns_float.empty()) {
if (d_columns_float[colname].size() > 0) {
d_columns_float[colname].resize(0);
d_columns_float[colname].shrink_to_fit();
};
};
};
void CudaSet::allocOnDevice(size_t RecordCount)
{
for(unsigned int i=0; i < columnNames.size(); i++)
allocColumnOnDevice(columnNames[i], RecordCount);
};
void CudaSet::deAllocOnDevice()
{
for(unsigned int i=0; i < columnNames.size(); i++) {
deAllocColumnOnDevice(columnNames[i]);
};
if(prm_d.size()) {
prm_d.resize(0);
prm_d.shrink_to_fit();
};
for (auto it=d_columns_int.begin(); it != d_columns_int.end(); ++it ) {
if(it->second.size() > 0) {
it->second.resize(0);
it->second.shrink_to_fit();
};
};
for (auto it=d_columns_float.begin(); it != d_columns_float.end(); ++it ) {
if(it->second.size() > 0) {
it->second.resize(0);
it->second.shrink_to_fit();
};
};
if(filtered) { // dealloc the source
if(varNames.find(source_name) != varNames.end()) {
varNames[source_name]->deAllocOnDevice();
};
};
};
void CudaSet::resizeDeviceColumn(size_t RecCount, string colname)
{
if (type[colname] != 1) {
d_columns_int[colname].resize(RecCount);
}
else
d_columns_float[colname].resize(RecCount);
};
void CudaSet::resizeDevice(size_t RecCount)
{
for(unsigned int i=0; i < columnNames.size(); i++) {
resizeDeviceColumn(RecCount, columnNames[i]);
};
};
bool CudaSet::onDevice(string colname)
{
if (type[colname] != 1) {
if (!d_columns_int.empty() && d_columns_int[colname].size())
return 1;
}
else
if (!d_columns_float.empty() && d_columns_float[colname].size())
return 1;
return 0;
}
CudaSet* CudaSet::copyDeviceStruct()
{
CudaSet* a = new CudaSet(mRecCount, mColumnCount);
a->not_compressed = not_compressed;
a->segCount = segCount;
a->maxRecs = maxRecs;
a->columnNames = columnNames;
a->ts_cols = ts_cols;
a->cols = cols;
a->type = type;
a->char_size = char_size;
a->decimal = decimal;
a->decimal_zeroes = decimal_zeroes;
for(unsigned int i=0; i < columnNames.size(); i++) {
if(a->type[columnNames[i]] == 0) {
a->d_columns_int[columnNames[i]] = thrust::device_vector<int_type>();
a->h_columns_int[columnNames[i]] = thrust::host_vector<int_type, uninitialized_host_allocator<int_type> >();
}
else
if(a->type[columnNames[i]] == 1) {
a->d_columns_float[columnNames[i]] = thrust::device_vector<float_type>();
a->h_columns_float[columnNames[i]] = thrust::host_vector<float_type, uninitialized_host_allocator<float_type> >();
}
else {
a->h_columns_char[columnNames[i]] = nullptr;
a->d_columns_char[columnNames[i]] = nullptr;
};
};
a->load_file_name = load_file_name;
a->mRecCount = 0;
return a;
}
int_type CudaSet::readSsdSegmentsFromFile(unsigned int segNum, string colname, size_t offset, thrust::host_vector<unsigned int>& prm_vh, CudaSet* dest)
{
string f1 = load_file_name + "." + colname + "." + to_string(segNum);
FILE* f = fopen(f1.c_str(), "rb" );
if(!f) {
cout << "Error opening " << f1 << " file " << endl;
exit(0);
};
unsigned int cnt, bits;
int_type lower_val;
unsigned short int val_s_r[4096/2];
char val_c_r[4096];
unsigned int val_i_r[4096/4];
unsigned long long int val_l_r[4096/8];
unsigned int idx;
bool idx_set = 0;
fread(&cnt, 4, 1, f);
fread(&lower_val, 8, 1, f);
fseek(f, cnt - (8+4) + 32, SEEK_CUR);
fread(&bits, 4, 1, f);
//cout << "lower_val bits " << lower_val << " " << bits << endl;
if(type[colname] == 0) {
//cout << "lower_val bits " << lower_val << " " << bits << endl;
for(unsigned int i = 0; i < prm_vh.size(); i++) {
if(!idx_set || prm_vh[i] >= idx + 4096/(bits/8)) {
fseek(f, 24 + prm_vh[i]*(bits/8), SEEK_SET);
idx = prm_vh[i];
idx_set = 1;
if(bits == 8) {
fread(&val_c_r[0], 4096, 1, f);
dest->h_columns_int[colname][i + offset] = val_c_r[0];
}
else
if(bits == 16) {
fread(&val_s_r, 4096, 1, f);
dest->h_columns_int[colname][i + offset] = val_s_r[0];
}
if(bits == 32) {
fread(&val_i_r, 4096, 1, f);
dest->h_columns_int[colname][i + offset] = val_i_r[0];
}
if(bits == 84) {
fread(&val_l_r, 4096, 1, f);
dest->h_columns_int[colname][i + offset] = val_l_r[0];
}
}
else {
if(bits == 8) {
dest->h_columns_int[colname][i + offset] = val_c_r[prm_vh[i]-idx];
}
else
if(bits == 16) {
dest->h_columns_int[colname][i + offset] = val_s_r[prm_vh[i]-idx];
}
if(bits == 32) {
dest->h_columns_int[colname][i + offset] = val_i_r[prm_vh[i]-idx];
}
if(bits == 84) {
dest->h_columns_int[colname][i + offset] = val_l_r[prm_vh[i]-idx];
}
};
};
}
else
if(type[colname] == 1) {
for(unsigned int i = 0; i < prm_vh.size(); i++) {
if(!idx_set || prm_vh[i] >= idx + 4096/(bits/8)) {
fseek(f, 24 + prm_vh[i]*(bits/8), SEEK_SET);
idx = prm_vh[i];
idx_set = 1;
fread(val_c_r, 4096, 1, f);
memcpy(&dest->h_columns_float[colname][i + offset], &val_c_r[0], bits/8);
}
else {
memcpy(&dest->h_columns_float[colname][i + offset], &val_c_r[(prm_vh[i]-idx)*(bits/8)], bits/8);
};
};
}
else {
//no strings in fact tables
};
fclose(f);
return lower_val;
}
int_type CudaSet::readSsdSegmentsFromFileR(unsigned int segNum, string colname, thrust::host_vector<unsigned int>& prm_vh, thrust::host_vector<unsigned int>& dest)
{
string f1 = load_file_name + "." + colname + "." + to_string(segNum);
FILE* f = fopen(f1.c_str(), "rb" );
if(!f) {
cout << "Error opening " << f1 << " file " << endl;
exit(0);
};
unsigned int cnt, bits;
int_type lower_val;
fread(&cnt, 4, 1, f);
fread(&lower_val, 8, 1, f);
fseek(f, cnt - (8+4) + 32, SEEK_CUR);
fread(&bits, 4, 1, f);
unsigned short int val_s_r[4096/2];
char val_c_r[4096];
unsigned int val_i_r[4096/4];
unsigned long long int val_l_r[4096/8];
unsigned int idx;
bool idx_set = 0;
for(unsigned int i = 0; i < prm_vh.size(); i++) {
if(!idx_set || prm_vh[i] >= idx + 4096/(bits/8)) {
fseek(f, 24 + prm_vh[i]*(bits/8), SEEK_SET);
idx = prm_vh[i];
idx_set = 1;
if(bits == 8) {
fread(val_c_r, 4096, 1, f);
dest[i] = val_c_r[0];
}
else
if(bits == 16) {
fread(val_s_r, 4096, 1, f);
dest[i] = val_s_r[0];
}
if(bits == 32) {
fread(val_i_r, 4096, 1, f);
dest[i] = val_i_r[0];
}
if(bits == 84) {
fread(val_l_r, 4096, 1, f);
dest[i] = val_l_r[0];
}
}
else {
if(bits == 8) {
dest[i] = val_c_r[prm_vh[i]-idx];
}
else
if(bits == 16) {
dest[i] = val_s_r[prm_vh[i]-idx];
}
if(bits == 32) {
dest[i] = val_i_r[prm_vh[i]-idx];
}
if(bits == 84) {
dest[i] = val_l_r[prm_vh[i]-idx];
}
};
};
fclose(f);
return lower_val;
}
std::clock_t tot_disk;
void CudaSet::readSegmentsFromFile(unsigned int segNum, string colname)
{
string f1 = load_file_name + "." + colname + "." + to_string(segNum);
if(type[colname] == 2)
f1 = f1 + ".idx";
std::clock_t start1 = std::clock();
if(interactive) { //check if data are in buffers
if(buffers.find(f1) == buffers.end()) { // add data to buffers
FILE* f = fopen(f1.c_str(), "rb" );
if(!f) {
process_error(3, "Error opening " + string(f1) +" file " );
};
fseek(f, 0, SEEK_END);
long fileSize = ftell(f);
while(total_buffer_size + fileSize > getTotalSystemMemory() && !buffer_names.empty()) { //free some buffers
//delete [] buffers[buffer_names.front()];
cudaFreeHost(buffers[buffer_names.front()]);
total_buffer_size = total_buffer_size - buffer_sizes[buffer_names.front()];
buffer_sizes.erase(buffer_names.front());
buffers.erase(buffer_names.front());
buffer_names.pop();
};
fseek(f, 0, SEEK_SET);
char* buff;
cudaHostAlloc((void**) &buff, fileSize,cudaHostAllocDefault);
fread(buff, fileSize, 1, f);
fclose(f);
buffers[f1] = buff;
buffer_sizes[f1] = fileSize;
buffer_names.push(f1);
total_buffer_size = total_buffer_size + fileSize;
buffer_names.push(f1);
cout << "added buffer " << f1 << " " << fileSize << endl;
};
// get data from buffers
if(type[colname] != 1) {
unsigned int cnt = ((unsigned int*)buffers[f1])[0];
if(cnt > h_columns_int[colname].size()/8 + 10)
h_columns_int[colname].resize(cnt/8 + 10);
}
else {
unsigned int cnt = ((unsigned int*)buffers[f1])[0];
if(cnt > h_columns_float[colname].size()/8 + 10)
h_columns_float[colname].resize(cnt/8 + 10);
}
}
else {
FILE* f = fopen(f1.c_str(), "rb" );
if(!f) {
cout << "Error opening " << f1 << " file " << endl;
exit(0);
};
if(type[colname] != 1) {
if(1 > h_columns_int[colname].size())
h_columns_int[colname].resize(1);
fread(h_columns_int[colname].data(), 4, 1, f);
unsigned int cnt = ((unsigned int*)(h_columns_int[colname].data()))[0];
if(cnt/8+10 > h_columns_int[colname].size()) {
h_columns_int[colname].resize(cnt + 10);
};
size_t rr = fread((unsigned int*)(h_columns_int[colname].data()) + 1, 1, cnt+52, f);
if(rr != cnt+52) {
char buf[1024];
sprintf(buf, "Couldn't read %d bytes from %s ,read only", cnt+52, f1.c_str());
process_error(3, string(buf));
};
}
else {
if(1 > h_columns_float[colname].size())
h_columns_float[colname].resize(1);
fread(h_columns_float[colname].data(), 4, 1, f);
unsigned int cnt = ((unsigned int*)(h_columns_float[colname].data()))[0];
if(cnt/8+10 > h_columns_float[colname].size())
h_columns_float[colname].resize(cnt + 10);
size_t rr = fread((unsigned int*)(h_columns_float[colname].data()) + 1, 1, cnt+52, f);
if(rr != cnt+52) {
char buf[1024];
sprintf(buf, "Couldn't read %d bytes from %s ,read only", cnt+52, f1.c_str());
process_error(3, string(buf));
};
}
fclose(f);
};
tot_disk = tot_disk + (std::clock() - start1);
};
void CudaSet::CopyColumnToGpu(string colname, unsigned int segment, size_t offset)
{
if(not_compressed) {
// calculate how many records we need to copy
if(segment < segCount-1) {
mRecCount = maxRecs;
}
else {
mRecCount = hostRecCount - maxRecs*(segCount-1);
};
if(type[colname] != 1) {
if(!alloced_switch) {
thrust::copy(h_columns_int[colname].begin() + maxRecs*segment, h_columns_int[colname].begin() + maxRecs*segment + mRecCount, d_columns_int[colname].begin() + offset);
}
else {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::copy(h_columns_int[colname].begin() + maxRecs*segment, h_columns_int[colname].begin() + maxRecs*segment + mRecCount, d_col);
};
}
else {
if(!alloced_switch) {
thrust::copy(h_columns_float[colname].begin() + maxRecs*segment, h_columns_float[colname].begin() + maxRecs*segment + mRecCount, d_columns_float[colname].begin() + offset);
}
else {
thrust::device_ptr<float_type> d_col((float_type*)alloced_tmp);
thrust::copy(h_columns_float[colname].begin() + maxRecs*segment, h_columns_float[colname].begin() + maxRecs*segment + mRecCount, d_col);
};
}
}
else {
readSegmentsFromFile(segment,colname);
if(!d_v)
CUDA_SAFE_CALL(cudaMalloc((void **) &d_v, 12));
if(!s_v)
CUDA_SAFE_CALL(cudaMalloc((void **) &s_v, 8));
string f1;
if(type[colname] == 2) {
f1 = load_file_name + "." + colname + "." + to_string(segment) + ".idx";
}
else {
f1 = load_file_name + "." + colname + "." + to_string(segment);
};
if(type[colname] != 1) {
if(!alloced_switch) {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress(thrust::raw_pointer_cast(d_columns_int[colname].data() + offset), h_columns_int[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress(thrust::raw_pointer_cast(d_columns_int[colname].data() + offset), buffers[f1], d_v, s_v, colname);
};
}
else {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress(alloced_tmp, h_columns_int[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress(alloced_tmp, buffers[f1], d_v, s_v, colname);
};
};
}
else {
if(decimal[colname]) {
if(!alloced_switch) {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress( thrust::raw_pointer_cast(d_columns_float[colname].data() + offset) , h_columns_float[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress( thrust::raw_pointer_cast(d_columns_float[colname].data() + offset) , buffers[f1], d_v, s_v, colname);
};
if(!phase_copy) {
thrust::device_ptr<long long int> d_col_int((long long int*)thrust::raw_pointer_cast(d_columns_float[colname].data() + offset));
thrust::transform(d_col_int,d_col_int+mRecCount,d_columns_float[colname].begin(), long_to_float());
};
}
else {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress(alloced_tmp, h_columns_float[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress(alloced_tmp, buffers[f1], d_v, s_v, colname);
};
if(!phase_copy) {
thrust::device_ptr<long long int> d_col_int((long long int*)alloced_tmp);
thrust::device_ptr<float_type> d_col_float((float_type*)alloced_tmp);
thrust::transform(d_col_int,d_col_int+mRecCount, d_col_float, long_to_float());
};
//for(int i = 0; i < mRecCount;i++)
//cout << "DECOMP " << (float_type)(d_col_int[i]) << " " << d_col_float[i] << endl;
};
}
//else // uncompressed float
// will have to fix it later so uncompressed data will be written by segments too
}
};
}
void CudaSet::CopyColumnToGpu(string colname) // copy all segments
{
if(not_compressed) {
if(type[colname] != 1)
thrust::copy(h_columns_int[colname].begin(), h_columns_int[colname].begin() + mRecCount, d_columns_int[colname].begin());
else
thrust::copy(h_columns_float[colname].begin(), h_columns_float[colname].begin() + mRecCount, d_columns_float[colname].begin());
}
else {
if(!d_v)
CUDA_SAFE_CALL(cudaMalloc((void **) &d_v, 12));
if(!s_v)
CUDA_SAFE_CALL(cudaMalloc((void **) &s_v, 8));
size_t cnt = 0;
string f1;
for(unsigned int i = 0; i < segCount; i++) {
readSegmentsFromFile(i,colname);
if(type[colname] == 2) {
f1 = load_file_name + "." + colname + "." + to_string(i) + ".idx";
}
else {
f1 = load_file_name + "." + colname + "." + to_string(i);
};
if(type[colname] == 0) {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress(thrust::raw_pointer_cast(d_columns_int[colname].data() + cnt), h_columns_int[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress(thrust::raw_pointer_cast(d_columns_int[colname].data() + cnt), buffers[f1], d_v, s_v, colname);
};
}
else
if(type[colname] == 1) {
if(decimal[colname]) {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress( thrust::raw_pointer_cast(d_columns_float[colname].data() + cnt) , h_columns_float[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress( thrust::raw_pointer_cast(d_columns_float[colname].data() + cnt) , buffers[f1], d_v, s_v, colname);
};
if(!phase_copy) {
thrust::device_ptr<long long int> d_col_int((long long int*)thrust::raw_pointer_cast(d_columns_float[colname].data() + cnt));
thrust::transform(d_col_int,d_col_int+mRecCount,d_columns_float[colname].begin() + cnt, long_to_float());
};
}
// else uncompressed float
// will have to fix it later so uncompressed data will be written by segments too
};
cnt = cnt + mRecCount;
//totalRecs = totals + mRecCount;
};
mRecCount = cnt;
};
}
void CudaSet::CopyColumnToHost(string colname, size_t offset, size_t RecCount)
{
if(type[colname] != 1) {
thrust::copy(d_columns_int[colname].begin(), d_columns_int[colname].begin() + RecCount, h_columns_int[colname].begin() + offset);
}
else
thrust::copy(d_columns_float[colname].begin(), d_columns_float[colname].begin() + RecCount, h_columns_float[colname].begin() + offset);
}
void CudaSet::CopyColumnToHost(string colname)
{
CopyColumnToHost(colname, 0, mRecCount);
}
void CudaSet::CopyToHost(size_t offset, size_t count)
{
for(unsigned int i = 0; i < columnNames.size(); i++) {
CopyColumnToHost(columnNames[i], offset, count);
};
}
float_type* CudaSet::get_float_type_by_name(string name)
{
return thrust::raw_pointer_cast(d_columns_float[name].data());
}
int_type* CudaSet::get_int_by_name(string name)
{
return thrust::raw_pointer_cast(d_columns_int[name].data());
}
float_type* CudaSet::get_host_float_by_name(string name)
{
return thrust::raw_pointer_cast(h_columns_float[name].data());
}
int_type* CudaSet::get_host_int_by_name(string name)
{
return thrust::raw_pointer_cast(h_columns_int[name].data());
}
void CudaSet::GroupBy(stack<string> columnRef)
{
thrust::device_vector<bool> grp_dev(mRecCount);
thrust::fill(grp_dev.begin(), grp_dev.end(), 0);
if(scratch.size() < mRecCount)
scratch.resize(mRecCount*sizeof(bool));
thrust::device_ptr<bool> d_group((bool*)thrust::raw_pointer_cast(scratch.data()));
d_group[mRecCount-1] = 0;
for(int i = 0; i < columnRef.size(); columnRef.pop()) {
unsigned int bits;
if(cpy_bits.empty())
bits = 0;
else
bits = cpy_bits[columnRef.top()];
if(bits == 8) {
thrust::device_ptr<unsigned char> src((unsigned char*)thrust::raw_pointer_cast(d_columns_int[columnRef.top()].data()));
thrust::transform(src, src + mRecCount - 1, src+1, d_group, thrust::not_equal_to<unsigned char>());
}
else
if(bits == 16) {
thrust::device_ptr<unsigned short int> src((unsigned short int*)thrust::raw_pointer_cast(d_columns_int[columnRef.top()].data()));
thrust::transform(src, src + mRecCount - 1, src+1, d_group, thrust::not_equal_to<unsigned short int>());
}
else
if(bits == 32) {
thrust::device_ptr<unsigned int> src((unsigned int*)thrust::raw_pointer_cast(d_columns_int[columnRef.top()].data()));
thrust::transform(src, src + mRecCount - 1, src+1, d_group, thrust::not_equal_to<unsigned int>());
}
else {
thrust::transform(d_columns_int[columnRef.top()].begin(), d_columns_int[columnRef.top()].begin() + mRecCount - 1,
d_columns_int[columnRef.top()].begin()+1, d_group, thrust::not_equal_to<int_type>());
};
thrust::transform(d_group, d_group+mRecCount, grp_dev.begin(), grp_dev.begin(), thrust::logical_or<bool>());
};
grp_count = thrust::count(grp_dev.begin(), grp_dev.end(), 1) + 1;
//cout << "grp count " << grp_count << endl;
grp.resize(grp_count);
if(grp_count > 1)
thrust::copy_if(thrust::make_counting_iterator((unsigned int)1), thrust::make_counting_iterator((unsigned int)grp_dev.size()),
grp_dev.begin(), grp.begin()+1, thrust::identity<bool>());
grp[0] = 0;
};
void CudaSet::addDeviceColumn(int_type* col, string colname, size_t recCount)
{
if (std::find(columnNames.begin(), columnNames.end(), colname) == columnNames.end()) {
columnNames.push_back(colname);
type[colname] = 0;
d_columns_int[colname] = thrust::device_vector<int_type>(recCount);
h_columns_int[colname] = thrust::host_vector<int_type, uninitialized_host_allocator<int_type> >(recCount);
}
else { // already exists, my need to resize it
if(d_columns_int[colname].size() < recCount) {
d_columns_int[colname].resize(recCount);
};
if(h_columns_int[colname].size() < recCount) {
h_columns_int[colname].resize(recCount);
};
};
// copy data to d columns
thrust::device_ptr<int_type> d_col((int_type*)col);
thrust::copy(d_col, d_col+recCount, d_columns_int[colname].begin());
thrust::copy(d_columns_int[colname].begin(), d_columns_int[colname].begin()+recCount, h_columns_int[colname].begin());
};
void CudaSet::addDeviceColumn(float_type* col, string colname, size_t recCount, bool is_decimal)
{
if (std::find(columnNames.begin(), columnNames.end(), colname) == columnNames.end()) {
columnNames.push_back(colname);
type[colname] = 1;
d_columns_float[colname] = thrust::device_vector<float_type>(recCount);
h_columns_float[colname] = thrust::host_vector<float_type, uninitialized_host_allocator<float_type> >(recCount);
}
else { // already exists, my need to resize it
if(d_columns_float[colname].size() < recCount)
d_columns_float[colname].resize(recCount);
if(h_columns_float[colname].size() < recCount)
h_columns_float[colname].resize(recCount);
};
decimal[colname] = is_decimal;
thrust::device_ptr<float_type> d_col((float_type*)col);
thrust::copy(d_col, d_col+recCount, d_columns_float[colname].begin());
};
void CudaSet::gpu_perm(queue<string> sf, thrust::device_vector<unsigned int>& permutation) {
permutation.resize(mRecCount);
thrust::sequence(permutation.begin(), permutation.begin() + mRecCount,0,1);
unsigned int* raw_ptr = thrust::raw_pointer_cast(permutation.data());
void* temp;
CUDA_SAFE_CALL(cudaMalloc((void **) &temp, mRecCount*8));
string sort_type = "ASC";
while(!sf.empty()) {
if (type[sf.front()] == 0) {
update_permutation(d_columns_int[sf.front()], raw_ptr, mRecCount, sort_type, (int_type*)temp, 64);
}
else
if (type[sf.front()] == 1) {
update_permutation(d_columns_float[sf.front()], raw_ptr, mRecCount, sort_type, (float_type*)temp, 64);
}
else {
thrust::host_vector<unsigned int> permutation_h = permutation;
char* temp1 = new char[char_size[sf.front()]*mRecCount];
update_permutation_char_host(h_columns_char[sf.front()], permutation_h.data(), mRecCount, sort_type, temp1, char_size[sf.front()]);
delete [] temp1;
permutation = permutation_h;
};
sf.pop();
};
cudaFree(temp);
}
void CudaSet::compress(string file_name, size_t offset, unsigned int check_type, unsigned int check_val, size_t mCount, const bool append)
{
string str(file_name);
thrust::device_vector<unsigned int> permutation;
long long int oldCount;
bool int_check = 0;
void* d;
CUDA_SAFE_CALL(cudaMalloc((void **) &d, mCount*float_size));
total_count = total_count + mCount;
if (mCount > total_max && op_sort.empty()) {
total_max = mCount;
};
if(!total_segments && append) {
string s= file_name + "." + columnNames[0] + ".header";
ifstream binary_file(s.c_str(),ios::binary);
if(binary_file) {
binary_file.read((char *)&oldCount, 8);
binary_file.read((char *)&total_segments, 4);
binary_file.read((char *)&maxRecs, 4);
if(total_max < maxRecs)
total_max = maxRecs;
binary_file.close();
total_count = oldCount + mCount;
};
};
if(!op_sort.empty()) { //sort the segment
gpu_perm(op_sort, permutation);
};
// here we need to check for partitions and if partition_count > 0 -> create partitions
if(mCount < partition_count || partition_count == 0)
partition_count = 1;
unsigned int partition_recs = mCount/partition_count;
if(!op_sort.empty()) {
if(total_max < partition_recs)
total_max = partition_recs;
};
total_segments++;
old_segments = total_segments;
size_t new_offset;
for(unsigned int i = 0; i < columnNames.size(); i++) {
std::clock_t start1 = std::clock();
string colname = columnNames[i];
str = file_name + "." + colname;
curr_file = str;
str += "." + to_string(total_segments-1);
new_offset = 0;
if(type[colname] == 0) {
thrust::device_ptr<int_type> d_col((int_type*)d);
if(!op_sort.empty()) {
thrust::gather(permutation.begin(), permutation.end(), d_columns_int[colname].begin(), d_col);
for(unsigned int p = 0; p < partition_count; p++) {
str = file_name + "." + colname;
curr_file = str;
str += "." + to_string(total_segments-1);
if (p < partition_count - 1) {
pfor_compress( (int_type*)d + new_offset, partition_recs*int_size, str, h_columns_int[colname], 0);
}
else {
pfor_compress( (int_type*)d + new_offset, (mCount - partition_recs*p)*int_size, str, h_columns_int[colname], 0);
};
new_offset = new_offset + partition_recs;
total_segments++;
};
}
else {
if(!int_check) {
thrust::copy(h_columns_int[colname].begin() + offset, h_columns_int[colname].begin() + offset + mCount, d_col);
pfor_compress( d, mCount*int_size, str, h_columns_int[colname], 0);
}
else {
pfor_compress( thrust::raw_pointer_cast(d_columns_int[colname].data()), mCount*int_size, str, h_columns_int[colname], 0);
};
};
}
else
if(type[colname] == 1) {
if(decimal[colname]) {
thrust::device_ptr<float_type> d_col((float_type*)d);
if(!op_sort.empty()) {
thrust::gather(permutation.begin(), permutation.end(), d_columns_float[colname].begin(), d_col);
thrust::device_ptr<long long int> d_col_dec((long long int*)d);
thrust::transform(d_col,d_col+mCount,d_col_dec, float_to_long());
for(unsigned int p = 0; p < partition_count; p++) {
str = file_name + "." + colname;
curr_file = str;
str += "." + to_string(total_segments-1);
if (p < partition_count - 1)
pfor_compress( (int_type*)d + new_offset, partition_recs*float_size, str, h_columns_float[colname], 1);
else
pfor_compress( (int_type*)d + new_offset, (mCount - partition_recs*p)*float_size, str, h_columns_float[colname], 1);
new_offset = new_offset + partition_recs;
total_segments++;
};
}
else {
thrust::copy(h_columns_float[colname].begin() + offset, h_columns_float[colname].begin() + offset + mCount, d_col);
thrust::device_ptr<long long int> d_col_dec((long long int*)d);
thrust::transform(d_col,d_col+mCount,d_col_dec, float_to_long());
pfor_compress( d, mCount*float_size, str, h_columns_float[colname], 1);
};
}
else { // do not compress -- float
thrust::device_ptr<float_type> d_col((float_type*)d);
if(!op_sort.empty()) {
thrust::gather(permutation.begin(), permutation.end(), d_columns_float[colname].begin(), d_col);
thrust::copy(d_col, d_col+mRecCount, h_columns_float[colname].begin());
for(unsigned int p = 0; p < partition_count; p++) {
str = file_name + "." + colname;
curr_file = str;
str += "." + to_string(total_segments-1);
unsigned int curr_cnt;
if (p < partition_count - 1)
curr_cnt = partition_recs;
else
curr_cnt = mCount - partition_recs*p;
fstream binary_file(str.c_str(),ios::out|ios::binary|fstream::app);
binary_file.write((char *)&curr_cnt, 4);
binary_file.write((char *)(h_columns_float[colname].data() + new_offset),curr_cnt*float_size);
new_offset = new_offset + partition_recs;
unsigned int comp_type = 3;
binary_file.write((char *)&comp_type, 4);
binary_file.close();
};
}
else {
fstream binary_file(str.c_str(),ios::out|ios::binary|fstream::app);
binary_file.write((char *)&mCount, 4);
binary_file.write((char *)(h_columns_float[colname].data() + offset),mCount*float_size);
unsigned int comp_type = 3;
binary_file.write((char *)&comp_type, 4);
binary_file.close();
};
};
}
else { //char
//populate char_hash
if(append && char_hash[colname].size() == 0 && varencoding[colname] != 'N') {
string s= file_name + "." + colname;
ifstream binary_file(s.c_str(), ios::binary | ios::ate);
if(binary_file) {
auto sz = binary_file.tellg();
binary_file.seekg(0, binary_file.beg);
char* strings = new char[sz];
binary_file.read(strings, sz);
binary_file.close();
//unsigned int ind = std::find(columnNames.begin(), columnNames.end(), colname) - columnNames.begin();
for (unsigned int z = 0 ; z < sz/char_size[colname]; z++) {
char_hash[colname][MurmurHash64A(&strings[z*char_size[colname]], char_size[colname], hash_seed)/2] = z;
};
delete [] strings;
};
};
if(!op_sort.empty()) {
unsigned int* h_permutation = new unsigned int[mRecCount];
thrust::copy(permutation.begin(), permutation.end(), h_permutation);
char* t = new char[char_size[colname]*mRecCount];
apply_permutation_char_host(h_columns_char[colname], h_permutation, mRecCount, t, char_size[colname]);
delete [] h_permutation;
thrust::copy(t, t+ char_size[colname]*mRecCount, h_columns_char[colname]);
delete [] t;
for(unsigned int p = 0; p < partition_count; p++) {
str = file_name + "." + colname;
curr_file = str;
str += "." + to_string(total_segments-1);
if (p < partition_count - 1)
compress_char(str, colname, partition_recs, new_offset, total_segments-1);
else
compress_char(str, colname, mCount - partition_recs*p, new_offset, total_segments-1);
new_offset = new_offset + partition_recs;
total_segments++;
};
}
else {
compress_char(str, colname, mCount, offset, total_segments-1);
};
};
if((check_type == 1 && fact_file_loaded) || (check_type == 1 && check_val == 0)) {
if(!op_sort.empty())
writeHeader(file_name, colname, total_segments-1);
else {
writeHeader(file_name, colname, total_segments);
};
};
total_segments = old_segments;
};
cudaFree(d);
if(!op_sort.empty()) {
total_segments = (old_segments-1)+partition_count;
};
permutation.resize(0);
permutation.shrink_to_fit();
}
void CudaSet::writeHeader(string file_name, string colname, unsigned int tot_segs) {
string str = file_name + "." + colname;
string ff = str;
str += ".header";
fstream binary_file(str.c_str(),ios::out|ios::binary|ios::trunc);
binary_file.write((char *)&total_count, 8);
binary_file.write((char *)&tot_segs, 4);
binary_file.write((char *)&total_max, 4);
binary_file.write((char *)&cnt_counts[ff], 4);
//cout << "HEADER1 " << total_count << " " << tot_segs << " " << total_max << endl;
binary_file.close();
};
void CudaSet::reWriteHeader(string file_name, string colname, unsigned int tot_segs, size_t newRecs, size_t maxRecs1) {
string str = file_name + "." + colname;
string ff = str;
str += ".header";
fstream binary_file(str.c_str(),ios::out|ios::binary|ios::trunc);
binary_file.write((char *)&newRecs, 8);
binary_file.write((char *)&tot_segs, 4);
binary_file.write((char *)&maxRecs1, 4);
//cout << "HEADER2 " << newRecs << endl;
binary_file.close();
};
void CudaSet::writeSortHeader(string file_name)
{
string str(file_name);
unsigned int idx;
if(!op_sort.empty()) {
str += ".sort";
fstream binary_file(str.c_str(),ios::out|ios::binary|ios::trunc);
idx = (unsigned int)op_sort.size();
binary_file.write((char *)&idx, 4);
queue<string> os(op_sort);
while(!os.empty()) {
if(verbose)
cout << "sorted on " << idx << endl;
idx = os.front().size();
binary_file.write((char *)&idx, 4);
binary_file.write(os.front().data(), idx);
os.pop();
};
binary_file.close();
}
else {
str += ".sort";
remove(str.c_str());
};
str = file_name;
if(!op_presort.empty()) {
str += ".presort";
fstream binary_file(str.c_str(),ios::out|ios::binary|ios::trunc);
idx = (unsigned int)op_presort.size();
binary_file.write((char *)&idx, 4);
queue<string> os(op_presort);
while(!os.empty()) {
idx = os.front().size();
binary_file.write((char *)&idx, 4);
binary_file.write(os.front().data(), idx);
os.pop();
};
binary_file.close();
}
else {
str += ".presort";
remove(str.c_str());
};
}
using namespace mgpu;
void CudaSet::Display(unsigned int limit, bool binary, bool term)
{
#define MAXCOLS 128
#define MAXFIELDSIZE 1400
//-- This should/will be converted to an array holding pointers of malloced sized structures--
char bigbuf[MAXCOLS * MAXFIELDSIZE];
memset(bigbuf, 0, MAXCOLS * MAXFIELDSIZE);
char *fields[MAXCOLS];
const char *dcolumns[MAXCOLS];
size_t mCount; // num records in play
bool print_all = 0;
string ss, str;
int rows = 0;
if(limit != 0 && limit < mRecCount)
mCount = limit;
else {
mCount = mRecCount;
print_all = 1;
};
cout << "mRecCount=" << mRecCount << " mcount = " << mCount << " term " << term << " limit=" << limit << " print_all=" << print_all << endl;
unsigned int cc =0;
unordered_map<string, FILE*> file_map;
unordered_map<string, unsigned int> len_map;
for(unsigned int i = 0; i < columnNames.size(); i++)
{
fields[cc] = &(bigbuf[cc*MAXFIELDSIZE]); // a hack to avoid malloc overheads - refine later
dcolumns[cc++] = columnNames[i].c_str();
if(string_map.find(columnNames[i]) != string_map.end()) {
auto s = string_map[columnNames[i]];
auto pos = s.find_first_of(".");
auto len = data_dict[s.substr(0, pos)][s.substr(pos+1)].col_length;
FILE *f;
f = fopen(string_map[columnNames[i]].c_str(), "rb");
file_map[string_map[columnNames[i]]] = f;
len_map[string_map[columnNames[i]]] = len;
};
};
// The goal here is to loop fast and avoid any double handling of outgoing data - pointers are good.
if(not_compressed && prm_d.size() == 0) {
for(unsigned int i=0; i < mCount; i++) { // for each record
for(unsigned int j=0; j < columnNames.size(); j++) { // for each col
if (type[columnNames[j]] != 1) {
if(string_map.find(columnNames[j]) == string_map.end()) {
if(decimal_zeroes[columnNames[j]]) {
str = std::to_string(h_columns_int[columnNames[j]][i]);
//cout << "decimals " << columnNames[j] << " " << decimal_zeroes[columnNames[j]] << " " << h_columns_int[columnNames[j]][i] << endl;
while(str.length() <= decimal_zeroes[columnNames[j]])
str = '0' + str;
str.insert(str.length()- decimal_zeroes[columnNames[j]], ".");
sprintf(fields[j], "%s", str.c_str());
}
else {
if(!ts_cols[columnNames[j]])
sprintf(fields[j], "%lld", (h_columns_int[columnNames[j]])[i] );
else {
time_t ts = (h_columns_int[columnNames[j]][i])/1000;
auto ti = gmtime(&ts);
char buffer[30];
auto rem = (h_columns_int[columnNames[j]][i])%1000;
strftime(buffer,30,"%Y-%m-%d %H.%M.%S", ti);
//fprintf(file_pr, "%s", buffer);
//fprintf(file_pr, ".%d", rem);
sprintf(fields[j], "%s.%d", buffer,rem);
/*time_t tt = h_columns_int[columnNames[j]][i];
auto ti = localtime(&tt);
char buffer[10];
strftime(buffer,80,"%Y-%m-%d", ti);
sprintf(fields[j], "%s", buffer);
*/
};
};
}
else {
fseek(file_map[string_map[columnNames[j]]], h_columns_int[columnNames[j]][i] * len_map[string_map[columnNames[j]]], SEEK_SET);
fread(fields[j], 1, len_map[string_map[columnNames[j]]], file_map[string_map[columnNames[j]]]);
fields[j][len_map[string_map[columnNames[j]]]] ='\0'; // zero terminate string
};
}
else
sprintf(fields[j], "%.2f", (h_columns_float[columnNames[j]])[i] );
};
row_cb(mColumnCount, (char **)fields, (char **)dcolumns);
rows++;
};
}
else {
queue<string> op_vx;
for(unsigned int i = 0; i < columnNames.size(); i++)
op_vx.push(columnNames[i]);
if(prm_d.size() || source) {
allocColumns(this, op_vx);
};
unsigned int curr_seg = 0;
size_t cnt = 0;
size_t curr_count, sum_printed = 0;
resize(maxRecs);
while(sum_printed < mCount || print_all) {
if(prm_d.size() || source) { // if host arrays are empty
copyColumns(this, op_vx, curr_seg, cnt);
size_t olRecs = mRecCount;
mRecCount = olRecs;
CopyToHost(0,mRecCount);
if(sum_printed + mRecCount <= mCount || print_all)
curr_count = mRecCount;
else
curr_count = mCount - sum_printed;
}
else
curr_count = mCount;
sum_printed = sum_printed + mRecCount;
for(unsigned int i=0; i < curr_count; i++) {
for(unsigned int j=0; j < columnNames.size(); j++) {
if (type[columnNames[j]] != 1) {
if(string_map.find(columnNames[j]) == string_map.end())
sprintf(fields[j], "%lld", (h_columns_int[columnNames[j]])[i] );
else {
fseek(file_map[string_map[columnNames[j]]], h_columns_int[columnNames[j]][i] * len_map[string_map[columnNames[j]]], SEEK_SET);
fread(fields[j], 1, len_map[string_map[columnNames[j]]], file_map[string_map[columnNames[j]]]);
fields[j][len_map[string_map[columnNames[j]]]] ='\0'; // zero terminate string
};
}
else
sprintf(fields[j], "%.2f", (h_columns_float[columnNames[j]])[i] );
};
row_cb(mColumnCount, (char **)fields, (char**)dcolumns);
rows++;
};
curr_seg++;
if(curr_seg == segCount)
print_all = 0;
};
}; // end else
for(auto it = file_map.begin(); it != file_map.end(); it++)
fclose(it->second);
}
void CudaSet::Store(const string file_name, const char* sep, const unsigned int limit, const bool binary, const bool append, const bool term)
{
if (mRecCount == 0 && binary == 1 && !term) { // write tails
for(unsigned int j=0; j < columnNames.size(); j++) {
writeHeader(file_name, columnNames[j], total_segments);
};
return;
};
size_t mCount;
bool print_all = 0;
string str;
if(limit != 0 && limit < mRecCount)
mCount = limit;
else {
mCount = mRecCount;
print_all = 1;
};
if(binary == 0) {
unordered_map<string, FILE*> file_map;
unordered_map<string, unsigned int> len_map;
string bf;
unsigned int max_len = 0;
for(unsigned int j=0; j < columnNames.size(); j++) {
if(string_map.find(columnNames[j]) != string_map.end()) {
auto s = string_map[columnNames[j]];
auto pos = s.find_first_of(".");
auto len = data_dict[s.substr(0, pos)][s.substr(pos+1)].col_length;
if(len > max_len)
max_len = len;
FILE *f;
f = fopen(string_map[columnNames[j]].c_str(), "rb");
file_map[string_map[columnNames[j]]] = f;
len_map[string_map[columnNames[j]]] = len;
};
};
bf.reserve(max_len);
FILE *file_pr;
if(!term) {
file_pr = fopen(file_name.c_str(), "w");
if (!file_pr)
cout << "Could not open file " << file_name << endl;
}
else
file_pr = stdout;
if(not_compressed && prm_d.size() == 0) {
for(unsigned int i=0; i < mCount; i++) {
for(unsigned int j=0; j < columnNames.size(); j++) {
if (type[columnNames[j]] != 1 ) {
if(string_map.find(columnNames[j]) == string_map.end()) {
if(decimal_zeroes[columnNames[j]]) {
str = std::to_string(h_columns_int[columnNames[j]][i]);
//cout << "decimals " << columnNames[j] << " " << decimal_zeroes[columnNames[j]] << " " << h_columns_int[columnNames[j]][i] << endl;
while(str.length() <= decimal_zeroes[columnNames[j]])
str = '0' + str;
str.insert(str.length()- decimal_zeroes[columnNames[j]], ".");
fprintf(file_pr, "%s", str.c_str());
}
else {
if(!ts_cols[columnNames[j]]) {
fprintf(file_pr, "%lld", (h_columns_int[columnNames[j]])[i]);
}
else {
time_t ts = (h_columns_int[columnNames[j]][i])/1000;
auto ti = gmtime(&ts);
char buffer[30];
auto rem = (h_columns_int[columnNames[j]][i])%1000;
strftime(buffer,30,"%Y-%m-%d %H.%M.%S", ti);
fprintf(file_pr, "%s", buffer);
fprintf(file_pr, ".%d", rem);
};
};
}
else {
fseek(file_map[string_map[columnNames[j]]], h_columns_int[columnNames[j]][i] * len_map[string_map[columnNames[j]]], SEEK_SET);
fread(&bf[0], 1, len_map[string_map[columnNames[j]]], file_map[string_map[columnNames[j]]]);
fprintf(file_pr, "%.*s", len_map[string_map[columnNames[j]]], bf.c_str());
};
fputs(sep, file_pr);
}
else {
fprintf(file_pr, "%.2f", (h_columns_float[columnNames[j]])[i]);
fputs(sep, file_pr);
}
};
if (i != mCount -1 )
fputs("\n",file_pr);
};
if(!term)
fclose(file_pr);
}
else {
queue<string> op_vx;
string ss;
for(unsigned int j=0; j < columnNames.size(); j++)
op_vx.push(columnNames[j]);
if(prm_d.size() || source) {
allocColumns(this, op_vx);
};
unsigned int curr_seg = 0;
size_t cnt = 0;
size_t curr_count, sum_printed = 0;
mRecCount = 0;
resize(maxRecs);
while(sum_printed < mCount || print_all) {
if(prm_d.size() || source) {
copyColumns(this, op_vx, curr_seg, cnt);
if(curr_seg == 0) {
if(limit != 0 && limit < mRecCount) {
mCount = limit;
print_all = 0;
}
else {
mCount = mRecCount;
print_all = 1;
};
};
// if host arrays are empty
size_t olRecs = mRecCount;
mRecCount = olRecs;
CopyToHost(0,mRecCount);
//cout << "start " << sum_printed << " " << mRecCount << " " << mCount << endl;
if(sum_printed + mRecCount <= mCount || print_all) {
curr_count = mRecCount;
}
else {
curr_count = mCount - sum_printed;
};
}
else {
curr_count = mCount;
};
sum_printed = sum_printed + mRecCount;
//cout << "sum printed " << sum_printed << " " << curr_count << " " << curr_seg << endl;
for(unsigned int i=0; i < curr_count; i++) {
for(unsigned int j=0; j < columnNames.size(); j++) {
if (type[columnNames[j]] != 1) {
if(string_map.find(columnNames[j]) == string_map.end()) {
if(decimal_zeroes[columnNames[j]]) {
str = std::to_string(h_columns_int[columnNames[j]][i]);
//cout << "decimals " << columnNames[j] << " " << decimal_zeroes[columnNames[j]] << " " << h_columns_int[columnNames[j]][i] << endl;
while(str.length() <= decimal_zeroes[columnNames[j]])
str = '0' + str;
str.insert(str.length()- decimal_zeroes[columnNames[j]], ".");
fprintf(file_pr, "%s", str.c_str());
}
else {
if(!ts_cols[columnNames[j]]) {
fprintf(file_pr, "%lld", (h_columns_int[columnNames[j]])[i]);
}
else {
time_t ts = (h_columns_int[columnNames[j]][i])/1000;
auto ti = gmtime(&ts);
char buffer[30];
auto rem = (h_columns_int[columnNames[j]][i])%1000;
strftime(buffer,30,"%Y-%m-%d %H.%M.%S", ti);
fprintf(file_pr, "%s", buffer);
fprintf(file_pr, ".%d", rem);
};
};
}
else {
fseek(file_map[string_map[columnNames[j]]], h_columns_int[columnNames[j]][i] * len_map[string_map[columnNames[j]]], SEEK_SET);
fread(&bf[0], 1, len_map[string_map[columnNames[j]]], file_map[string_map[columnNames[j]]]);
fprintf(file_pr, "%.*s", len_map[string_map[columnNames[j]]], bf.c_str());
};
fputs(sep, file_pr);
}
else {
fprintf(file_pr, "%.2f", (h_columns_float[columnNames[j]])[i]);
fputs(sep, file_pr);
};
};
if (i != mCount -1 && (curr_seg != segCount || i < curr_count))
fputs("\n",file_pr);
};
curr_seg++;
if(curr_seg == segCount)
print_all = 0;
};
if(!term) {
fclose(file_pr);
};
};
for(auto it = file_map.begin(); it != file_map.end(); it++)
fclose(it->second);
}
else {
//lets update the data dictionary
for(unsigned int j=0; j < columnNames.size(); j++) {
data_dict[file_name][columnNames[j]].col_type = type[columnNames[j]];
if(type[columnNames[j]] != 2) {
if(decimal[columnNames[j]])
data_dict[file_name][columnNames[j]].col_length = decimal_zeroes[columnNames[j]];
else
if (ts_cols[columnNames[j]])
data_dict[file_name][columnNames[j]].col_length = UINT_MAX;
else
data_dict[file_name][columnNames[j]].col_length = 0;
}
else
data_dict[file_name][columnNames[j]].col_length = char_size[columnNames[j]];
};
save_dict = 1;
if(text_source) { //writing a binary file using a text file as a source
compress(file_name, 0, 1, 0, mCount, append);
for(unsigned int i = 0; i< columnNames.size(); i++)
if(type[columnNames[i]] == 2)
deAllocColumnOnDevice(columnNames[i]);
}
else { //writing a binary file using a binary file as a source
fact_file_loaded = 1;
size_t offset = 0;
if(!not_compressed) { // records are compressed, for example after filter op.
//decompress to host
queue<string> op_vx;
for(unsigned int i = 0; i< columnNames.size(); i++) {
op_vx.push(columnNames[i]);
};
allocColumns(this, op_vx);
size_t oldCnt = mRecCount;
mRecCount = 0;
resize(oldCnt);
mRecCount = oldCnt;
for(unsigned int i = 0; i < segCount; i++) {
size_t cnt = 0;
copyColumns(this, op_vx, i, cnt);
CopyToHost(0, mRecCount);
offset = offset + mRecCount;
compress(file_name, 0, 0, i - (segCount-1), mRecCount, append);
};
}
else {
// now we have decompressed records on the host
//call setSegments and compress columns in every segment
segCount = (mRecCount/process_count + 1);
offset = 0;
for(unsigned int z = 0; z < segCount; z++) {
if(z < segCount-1) {
if(mRecCount < process_count) {
mCount = mRecCount;
}
else {
mCount = process_count;
}
}
else {
mCount = mRecCount - (segCount-1)*process_count;
};
compress(file_name, offset, 0, z - (segCount-1), mCount, append);
offset = offset + mCount;
};
};
};
};
}
void CudaSet::compress_char(const string file_name, const string colname, const size_t mCount, const size_t offset, const unsigned int segment)
{
unsigned int len = char_size[colname];
string h_name, i_name, file_no_seg = file_name.substr(0, file_name.find_last_of("."));
i_name = file_no_seg + "." + to_string(segment) + ".idx";
h_name = file_no_seg + "." + to_string(segment) + ".hash";
fstream b_file_str, loc_hashes;
fstream binary_file_h(h_name.c_str(),ios::out|ios::binary|ios::trunc);
binary_file_h.write((char *)&mCount, 4);
if(segment == 0) {
b_file_str.open(file_no_seg.c_str(),ios::out|ios::binary|ios::trunc);
}
else {
b_file_str.open(file_no_seg.c_str(),ios::out|ios::binary|ios::app);
};
if(h_columns_int.find(colname) == h_columns_int.end()) {
h_columns_int[colname] = thrust::host_vector<int_type >(mCount);
}
else {
if(h_columns_int[colname].size() < mCount)
h_columns_int[colname].resize(mCount);
};
if(d_columns_int.find(colname) == d_columns_int.end()) {
d_columns_int[colname] = thrust::device_vector<int_type >(mCount);
}
else {
if(d_columns_int[colname].size() < mCount)
d_columns_int[colname].resize(mCount);
};
size_t cnt;
long long int* hash_array = new long long int[mCount];
unordered_map<unsigned long long int, size_t>::iterator iter;
vector<int_type> test(mCount);
if(char_hash[colname].size() == 0 && varencoding[colname] == 'N')
char_hash[colname][0] = 0;
if(varencoding[colname] != 'N') {
for (unsigned int i = 0 ; i < mCount; i++) {
hash_array[i] = MurmurHash64A(h_columns_char[colname] + (i+offset)*len, len, hash_seed)/2;
iter = char_hash[colname].find(hash_array[i]);
if(iter == char_hash[colname].end()) {
cnt = char_hash[colname].size();
char_hash[colname][hash_array[i]] = cnt;
b_file_str.write((char *)h_columns_char[colname] + (i+offset)*len, len);
test[i] = cnt;
}
else {
test[i] = iter->second;
};
};
}
else {
auto cnt = char_hash[colname][0];
for (unsigned int i = 0 ; i < mCount; i++) {
hash_array[i] = MurmurHash64A(h_columns_char[colname] + (i+offset)*len, len, hash_seed)/2;
//cnt = char_hash[colname][0];
//char_hash[colname][0]++;
cnt++;
b_file_str.write((char *)h_columns_char[colname] + (i+offset)*len, len);
test[i] = cnt;
};
char_hash[colname][0] = cnt;
};
memcpy(h_columns_int[colname].data(), test.data(), mCount*8);
binary_file_h.write((char *)hash_array, 8*mCount);
delete [] hash_array;
thrust::device_vector<int_type> d_col(mCount);
thrust::copy(h_columns_int[colname].begin(), h_columns_int[colname].begin() + mCount, d_col.begin());
pfor_compress(thrust::raw_pointer_cast(d_col.data()), mCount*int_size, i_name, h_columns_int[colname], 0);
binary_file_h.close();
b_file_str.close();
};
bool first_time = 1;
size_t rec_sz = 0;
size_t process_piece;
bool CudaSet::LoadBigFile(FILE* file_p, thrust::device_vector<char>& d_readbuff, thrust::device_vector<char*>& dest,
thrust::device_vector<unsigned int>& ind, thrust::device_vector<unsigned int>& dest_len)
{
const char* sep = separator.c_str();
unsigned int maxx = cols.rbegin()->first;
map<unsigned int, string>::iterator it;
bool done = 0;
std::clock_t start1 = std::clock();
vector<int> types;
vector<int> cl;
types.push_back(0);
for(int i = 0; i < maxx; i++) {
auto iter = cols.find(i+1);
if(iter != cols.end()) {
types.push_back(type[iter->second]);
cl.push_back(iter->first-1);
}
else
types.push_back(0);
};
if(first_time) {
if(process_count*4 > getFreeMem()) {
process_piece = getFreeMem()/4;
}
else
process_piece = process_count;
readbuff = new char[process_piece+1];
d_readbuff.resize(process_piece+1);
cout << "set a piece to " << process_piece << " " << getFreeMem() << endl;
};
thrust::device_vector<unsigned int> ind_cnt(1);
thrust::device_vector<char> sepp(1);
sepp[0] = *sep;
long long int total_processed = 0;
size_t recs_processed = 0;
bool finished = 0;
thrust::device_vector<long long int> dev_pos;
long long int offset;
unsigned int cnt = 1;
const unsigned int max_len = 23;
while(!done) {
auto rb = fread(readbuff, 1, process_piece, file_p);
if(rb < process_piece) {
done = 1;
finished = 1;
fclose(file_p);
};
if(total_processed >= process_count)
done = 1;
thrust::fill(d_readbuff.begin(), d_readbuff.end(),0);
thrust::copy(readbuff, readbuff+rb, d_readbuff.begin());
auto curr_cnt = thrust::count(d_readbuff.begin(), d_readbuff.begin() + rb, '\n') - 1;
if(recs_processed == 0 && first_time) {
rec_sz = curr_cnt;
if(finished)
rec_sz++;
total_max = curr_cnt;
};
if(first_time) {
for(unsigned int i=0; i < columnNames.size(); i++) {
auto colname = columnNames[i];
if (type[colname] == 0) {
d_columns_int[colname].resize(d_columns_int[colname].size() + rec_sz);
h_columns_int[colname].resize(h_columns_int[colname].size() + rec_sz);
}
else
if (type[colname] == 1) {
d_columns_float[colname].resize(d_columns_float[colname].size() + rec_sz);
h_columns_float[colname].resize(h_columns_float[colname].size() + rec_sz);
}
else {
char* c = new char[cnt*rec_sz*char_size[columnNames[i]]];
if(recs_processed > 0) {
memcpy(c, h_columns_char[columnNames[i]], recs_processed*char_size[columnNames[i]]);
delete [] h_columns_char[columnNames[i]];
};
h_columns_char[columnNames[i]] = c;
if(recs_processed == 0) {
void* temp;
CUDA_SAFE_CALL(cudaMalloc((void **) &temp, char_size[columnNames[i]]*rec_sz));
cudaMemset(temp,0,char_size[columnNames[i]]*rec_sz);
d_columns_char[columnNames[i]] = (char*)temp;
};
};
if(recs_processed == 0) {
ind[i] = cl[i];
void* temp;
if(type[columnNames[i]] != 2) {
if(!ts_cols[columnNames[i]]) {
CUDA_SAFE_CALL(cudaMalloc((void **) &temp, max_len*rec_sz));
dest_len[i] = max_len;
}
else {
CUDA_SAFE_CALL(cudaMalloc((void **) &temp, 23*rec_sz));
dest_len[i] = 23;
}
}
else {
CUDA_SAFE_CALL(cudaMalloc((void **) &temp, char_size[columnNames[i]]*rec_sz));
dest_len[i] = char_size[columnNames[i]];
};
dest[i] = (char*)temp;
};
};
};
for(unsigned int i=0; i < columnNames.size(); i++) {
if(type[columnNames[i]] != 2) {
cudaMemset(dest[i],0,max_len*rec_sz);
}
else {
cudaMemset(dest[i],0,char_size[columnNames[i]]*rec_sz);
};
};
if(dev_pos.size() < curr_cnt+1)
dev_pos.resize(curr_cnt+1); //avoiding the unnecessary allocs
dev_pos[0] = -1;
thrust::copy_if(thrust::make_counting_iterator((unsigned long long int)0), thrust::make_counting_iterator((unsigned long long int)rb-1),
d_readbuff.begin(), dev_pos.begin()+1, _1 == '\n');
if(!finished) {
if(curr_cnt < rec_sz) {
offset = (dev_pos[curr_cnt] - rb)+1;
//cout << "PATH 1 " << dev_pos[curr_cnt] << " " << offset << endl;
fseek(file_p, offset, SEEK_CUR);
total_processed = total_processed + rb + offset;
mRecCount = curr_cnt;
}
else {
offset = (dev_pos[rec_sz] - rb)+1;
//cout << "PATH 2 " << dev_pos[rec_sz] << " " << offset << endl;
fseek(file_p, offset, SEEK_CUR);
total_processed = total_processed + rb + offset;
mRecCount = rec_sz;
};
}
else {
mRecCount = curr_cnt + 1;
};
thrust::counting_iterator<unsigned int> begin(0);
ind_cnt[0] = mColumnCount;
parse_functor ff((const char*)thrust::raw_pointer_cast(d_readbuff.data()),(char**)thrust::raw_pointer_cast(dest.data()), thrust::raw_pointer_cast(ind.data()),
thrust::raw_pointer_cast(ind_cnt.data()), thrust::raw_pointer_cast(sepp.data()), thrust::raw_pointer_cast(dev_pos.data()), thrust::raw_pointer_cast(dest_len.data()));
thrust::for_each(begin, begin + mRecCount, ff);
ind_cnt[0] = max_len;
for(int i =0; i < mColumnCount; i++) {
if(type[columnNames[i]] == 0) { //int
thrust::device_ptr<char> p1((char*)dest[i]);
if(p1[4] == '-') { //date
if(!ts_cols[columnNames[i]]) {
gpu_date date_ff((const char*)dest[i],(long long int*)thrust::raw_pointer_cast(d_columns_int[columnNames[i]].data()) + recs_processed);
thrust::for_each(begin, begin + mRecCount, date_ff);
}
else {
gpu_tdate date_ff((const char*)dest[i],(long long int*)thrust::raw_pointer_cast(d_columns_int[columnNames[i]].data()) + recs_processed);
thrust::for_each(begin, begin + mRecCount, date_ff);
}
}
else { //int
if(decimal[columnNames[i]]) {
thrust::device_vector<unsigned int> scale(1);
scale[0] = decimal_zeroes[columnNames[i]];
gpu_atold atold((const char*)dest[i],(long long int*)thrust::raw_pointer_cast(d_columns_int[columnNames[i]].data()) + recs_processed,
thrust::raw_pointer_cast(ind_cnt.data()), thrust::raw_pointer_cast(scale.data()));
thrust::for_each(begin, begin + mRecCount, atold);
}
else {
gpu_atoll atoll_ff((const char*)dest[i],(long long int*)thrust::raw_pointer_cast(d_columns_int[columnNames[i]].data()) + recs_processed,
thrust::raw_pointer_cast(ind_cnt.data()));
thrust::for_each(begin, begin + mRecCount, atoll_ff);
};
};
thrust::copy(d_columns_int[columnNames[i]].begin() + recs_processed, d_columns_int[columnNames[i]].begin()+recs_processed+mRecCount, h_columns_int[columnNames[i]].begin() + recs_processed);
}
else
if(type[columnNames[i]] == 1) {
gpu_atof atof_ff((const char*)dest[i],(double*)thrust::raw_pointer_cast(d_columns_float[columnNames[i]].data()) + recs_processed,
thrust::raw_pointer_cast(ind_cnt.data()));
thrust::for_each(begin, begin + mRecCount, atof_ff);
thrust::copy(d_columns_float[columnNames[i]].begin() + recs_processed, d_columns_float[columnNames[i]].begin()+recs_processed+mRecCount, h_columns_float[columnNames[i]].begin() + recs_processed);
}
else {//char is already done
thrust::device_ptr<char> p1((char*)dest[i]);
cudaMemcpy( h_columns_char[columnNames[i]] + char_size[columnNames[i]]*recs_processed, (void *)dest[i] , char_size[columnNames[i]]*mRecCount, cudaMemcpyDeviceToHost);
};
};
recs_processed = recs_processed + mRecCount;
cnt++;
};
if(finished) {
for(int i =0; i < mColumnCount; i++) {
if(dest[i]) {
cudaFree(dest[i]);
dest[i] = nullptr;
};
};
delete [] readbuff;
};
cout << "processed recs " << recs_processed << " " << getFreeMem() << endl;
first_time = 0;
mRecCount = recs_processed;
return finished;
};
void CudaSet::free() {
for(unsigned int i = 0; i < columnNames.size(); i++ ) {
if(type[columnNames[i]] == 0 && h_columns_int[columnNames[i]].size() ) {
h_columns_int[columnNames[i]].resize(0);
h_columns_int[columnNames[i]].shrink_to_fit();
}
else {
h_columns_float[columnNames[i]].resize(0);
h_columns_float[columnNames[i]].shrink_to_fit();
};
};
if(prm_d.size()) {
prm_d.resize(0);
prm_d.shrink_to_fit();
};
deAllocOnDevice();
};
void alloc_pool(unsigned int maxRecs) {
void* temp;
CUDA_SAFE_CALL(cudaMalloc((void **) &temp, 8*maxRecs));
alloced_mem.push_back(temp);
};
bool* CudaSet::logical_and(bool* column1, bool* column2)
{
thrust::device_ptr<bool> dev_ptr1(column1);
thrust::device_ptr<bool> dev_ptr2(column2);
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, dev_ptr1, thrust::logical_and<bool>());
thrust::device_free(dev_ptr2);
return column1;
}
bool* CudaSet::logical_or(bool* column1, bool* column2)
{
thrust::device_ptr<bool> dev_ptr1(column1);
thrust::device_ptr<bool> dev_ptr2(column2);
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, dev_ptr1, thrust::logical_or<bool>());
thrust::device_free(dev_ptr2);
return column1;
}
bool* CudaSet::compare(int_type s, int_type d, int_type op_type)
{
bool res;
if (op_type == 2) // >
if(d>s)
res = 1;
else
res = 0;
else
if (op_type == 1) // <
if(d<s)
res = 1;
else
res = 0;
else
if (op_type == 6) // >=
if(d>=s)
res = 1;
else
res = 0;
else
if (op_type == 5) // <=
if(d<=s)
res = 1;
else
res = 0;
else
if (op_type == 4)// =
if(d==s)
res = 1;
else
res = 0;
else // !=
if(d!=s)
res = 1;
else
res = 0;
thrust::device_ptr<bool> p = thrust::device_malloc<bool>(mRecCount);
thrust::sequence(p, p+mRecCount,res,(bool)0);
return thrust::raw_pointer_cast(p);
};
bool* CudaSet::compare(float_type s, float_type d, int_type op_type)
{
bool res;
if (op_type == 2) // >
if ((d-s) > EPSILON)
res = 1;
else
res = 0;
else
if (op_type == 1) // <
if ((s-d) > EPSILON)
res = 1;
else
res = 0;
else
if (op_type == 6) // >=
if (((d-s) > EPSILON) || (((d-s) < EPSILON) && ((d-s) > -EPSILON)))
res = 1;
else
res = 0;
else
if (op_type == 5) // <=
if (((s-d) > EPSILON) || (((d-s) < EPSILON) && ((d-s) > -EPSILON)))
res = 1;
else
res = 0;
else
if (op_type == 4)// =
if (((d-s) < EPSILON) && ((d-s) > -EPSILON))
res = 1;
else
res = 0;
else // !=
if (!(((d-s) < EPSILON) && ((d-s) > -EPSILON)))
res = 1;
else
res = 0;
thrust::device_ptr<bool> p = thrust::device_malloc<bool>(mRecCount);
thrust::sequence(p, p+mRecCount,res,(bool)0);
return thrust::raw_pointer_cast(p);
}
bool* CudaSet::compare(float_type* column1, float_type d, int_type op_type)
{
thrust::device_ptr<bool> res = thrust::device_malloc<bool>(mRecCount);
thrust::device_ptr<float_type> dev_ptr(column1);
if (op_type == 2) // >
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_greater());
else
if (op_type == 1) // <
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_less());
else
if (op_type == 6) // >=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_greater_equal_to());
else
if (op_type == 5) // <=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_less_equal());
else
if (op_type == 4)// =
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_equal_to());
else // !=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_not_equal_to());
return thrust::raw_pointer_cast(res);
}
bool* CudaSet::compare(int_type* column1, int_type d, int_type op_type, unsigned int p1, unsigned int p2)
{
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
thrust::device_ptr<int_type> dev_ptr(column1);
if(p2)
d = d*(int_type)pow(10, p2);
if (op_type == 2) // >
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::greater<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::greater<int_type>());
else
if (op_type == 1) // <
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::less<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::less<int_type>());
else
if (op_type == 6) // >=
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::greater_equal<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::greater_equal<int_type>());
else
if (op_type == 5) // <=
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::less_equal<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::less_equal<int_type>());
else
if (op_type == 4)// =
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::equal_to<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::equal_to<int_type>());
else // !=
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::not_equal_to<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::not_equal_to<int_type>());
return thrust::raw_pointer_cast(temp);
}
bool* CudaSet::compare(int_type* column1, int_type* column2, int_type op_type, unsigned int p1, unsigned int p2)
{
thrust::device_ptr<int_type> dev_ptr1(column1);
thrust::device_ptr<int_type> dev_ptr2(column2);
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
if (op_type == 2) // >
if(!p1 && !p2) {
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::greater<int_type>());
}
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::greater<int_type>());
else
if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::greater<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::greater<int_type>());
else
if (op_type == 1) // <
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::less<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::less<int_type>());
else
if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::less<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::less<int_type>());
else
if (op_type == 6) // >=
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::greater_equal<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::greater_equal<int_type>());
else
if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::greater_equal<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::greater_equal<int_type>());
else
if (op_type == 5) // <=
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::less_equal<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::less_equal<int_type>());
else
if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::less_equal<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::less_equal<int_type>());
else
if (op_type == 4)// =
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::equal_to<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::equal_to<int_type>());
else
if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::equal_to<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::equal_to<int_type>());
else // !=
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::not_equal_to<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::not_equal_to<int_type>());
else
if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::not_equal_to<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::not_equal_to<int_type>());
return thrust::raw_pointer_cast(temp);
}
bool* CudaSet::compare(float_type* column1, float_type* column2, int_type op_type)
{
thrust::device_ptr<float_type> dev_ptr1(column1);
thrust::device_ptr<float_type> dev_ptr2(column2);
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
if (op_type == 2) // >
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater());
else
if (op_type == 1) // <
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less());
else
if (op_type == 6) // >=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater_equal_to());
else
if (op_type == 5) // <=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less_equal());
else
if (op_type == 4)// =
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_equal_to());
else // !=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_not_equal_to());
return thrust::raw_pointer_cast(temp);
}
bool* CudaSet::compare(float_type* column1, int_type* column2, int_type op_type)
{
thrust::device_ptr<float_type> dev_ptr1(column1);
thrust::device_ptr<int_type> dev_ptr(column2);
thrust::device_ptr<float_type> dev_ptr2 = thrust::device_malloc<float_type>(mRecCount);
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
thrust::transform(dev_ptr, dev_ptr + mRecCount, dev_ptr2, long_to_float_type());
if (op_type == 2) // >
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater());
else
if (op_type == 1) // <
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less());
else
if (op_type == 6) // >=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater_equal_to());
else
if (op_type == 5) // <=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less_equal());
else
if (op_type == 4)// =
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_equal_to());
else // !=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_not_equal_to());
thrust::device_free(dev_ptr2);
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(int_type* column1, float_type* column2, string op_type, bool reverse)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
thrust::device_ptr<float_type> temp((float_type*)alloced_mem.back());
thrust::device_ptr<int_type> dev_ptr(column1);
thrust::transform(dev_ptr, dev_ptr + mRecCount, temp, long_to_float_type()); // in-place transformation
thrust::device_ptr<float_type> dev_ptr1(column2);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::multiplies<float_type>());
else
if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::plus<float_type>());
else
if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else
if (op_type.compare("ADD") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else
if (op_type.compare("MINUS") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
alloced_mem.pop_back();
return thrust::raw_pointer_cast(temp);
}
int_type* CudaSet::op(int_type* column1, int_type d, string op_type, bool reverse, unsigned int p1, unsigned int p2)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
//cout << "OP " << d << " " << op_type << " " << p1 << " " << p2 << endl;
thrust::device_ptr<int_type> temp((int_type*)alloced_mem.back());
thrust::device_ptr<int_type> dev_ptr1(column1);
unsigned int d1 = d;
if(p2)
d = d*(unsigned int)pow(10, p2);
if(reverse == 0) {
if (op_type.compare("MUL") == 0) {
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d1), temp, thrust::multiplies<int_type>());
}
else
if (op_type.compare("ADD") == 0) {
if(!p1)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d*(unsigned int)pow(10, p2)), temp, thrust::plus<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::plus<int_type>());
}
else
if (op_type.compare("MINUS") == 0) {
if(!p1)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d*(unsigned int)pow(10, p2)), temp, thrust::minus<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::minus<int_type>());
}
else {
if(!p1)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d*(unsigned int)pow(10, p2)), temp, thrust::divides<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::divides<int_type>());
}
}
else {
if (op_type.compare("MUL") == 0) {
if(!p1)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, dev_ptr1, temp, thrust::multiplies<int_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::multiplies<int_type>());
}
else
if (op_type.compare("ADD") == 0) {
if(!p1)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, dev_ptr1, temp, thrust::plus<int_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::plus<int_type>());
}
else
if (op_type.compare("MINUS") == 0) {
if(!p1)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, dev_ptr1, temp, thrust::minus<int_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::minus<int_type>());
}
else {
if(!p1)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, dev_ptr1, temp, thrust::divides<int_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::divides<int_type>());
};
};
alloced_mem.pop_back();
return thrust::raw_pointer_cast(temp);
}
int_type* CudaSet::op(int_type* column1, int_type* column2, string op_type, bool reverse, unsigned int p1, unsigned int p2)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
thrust::device_ptr<int_type> temp((int_type*)alloced_mem.back());
thrust::device_ptr<int_type> dev_ptr1(column1);
thrust::device_ptr<int_type> dev_ptr2(column2);
//cout << "OP " << op_type << " " << p1 << " " << p2 << " " << reverse << endl;
if(reverse == 0) {
if (op_type.compare("MUL") == 0) {
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::multiplies<int_type>());
}
else
if (op_type.compare("ADD") == 0) {
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::plus<int_type>());
else
if(p1 && p2) {
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::plus<int_type>());
}
else
if (p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::plus<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::plus<int_type>());
}
else
if (op_type.compare("MINUS") == 0) {
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::minus<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::minus<int_type>());
else
if (p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::minus<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::minus<int_type>());
}
else {
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::divides<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::divides<int_type>());
else
if (p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::divides<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::divides<int_type>());
}
}
else {
if (op_type.compare("MUL") == 0) {
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::multiplies<int_type>());
}
else
if (op_type.compare("ADD") == 0) {
if(!p1 && !p2)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::plus<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::plus<int_type>());
else
if (p1)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::plus<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), dev_ptr1, temp, thrust::plus<int_type>());
}
else
if (op_type.compare("MINUS") == 0) {
if(!p1 && !p2)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::minus<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::minus<int_type>());
else
if (p1)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::minus<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), dev_ptr1, temp, thrust::minus<int_type>());
}
else {
if(!p1 && !p2)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::divides<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::divides<int_type>());
else
if (p1)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::divides<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), dev_ptr1, temp, thrust::divides<int_type>());
}
}
alloced_mem.pop_back();
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(float_type* column1, float_type* column2, string op_type, bool reverse)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
thrust::device_ptr<float_type> temp((float_type*)alloced_mem.back());
thrust::device_ptr<float_type> dev_ptr1(column1);
thrust::device_ptr<float_type> dev_ptr2(column2);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::multiplies<float_type>());
else
if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::plus<float_type>());
else
if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else
if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else
if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
alloced_mem.pop_back();
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(int_type* column1, float_type d, string op_type, bool reverse)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
thrust::device_ptr<float_type> temp((float_type*)alloced_mem.back());
thrust::fill(temp, temp+mRecCount, d);
thrust::device_ptr<int_type> dev_ptr(column1);
thrust::device_ptr<float_type> dev_ptr1 = thrust::device_malloc<float_type>(mRecCount);
thrust::transform(dev_ptr, dev_ptr + mRecCount, dev_ptr1, long_to_float_type());
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::multiplies<float_type>());
else
if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::plus<float_type>());
else
if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else
if (op_type.compare("ADD") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else
if (op_type.compare("MINUS") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
thrust::device_free(dev_ptr1);
alloced_mem.pop_back();
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(float_type* column1, float_type d, string op_type,bool reverse)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
thrust::device_ptr<float_type> temp((float_type*)alloced_mem.back());
thrust::device_ptr<float_type> dev_ptr1(column1);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::multiplies<float_type>());
else
if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::plus<float_type>());
else
if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else
if (op_type.compare("ADD") == 0)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else
if (op_type.compare("MINUS") == 0)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
alloced_mem.pop_back();
return (float_type*)thrust::raw_pointer_cast(temp);
}
char CudaSet::loadIndex(const string index_name, const unsigned int segment)
{
FILE* f;
unsigned int bits_encoded, fit_count, vals_count, sz, real_count;
void* d_str;
string f1 = index_name + "." + to_string(segment);
char res;
//interactive = 0;
if(interactive) {
if(index_buffers.find(f1) == index_buffers.end()) {
f = fopen (f1.c_str(), "rb" );
fseek(f, 0, SEEK_END);
long fileSize = ftell(f);
char* buff;
cudaHostAlloc(&buff, fileSize, cudaHostAllocDefault);
fseek(f, 0, SEEK_SET);
fread(buff, fileSize, 1, f);
fclose(f);
index_buffers[f1] = buff;
};
sz = ((unsigned int*)index_buffers[f1])[0];
idx_dictionary_int[index_name].clear();
for(unsigned int i = 0; i < sz; i++) {
idx_dictionary_int[index_name][((int_type*)(index_buffers[f1]+4+8*i))[0]] = i;
};
vals_count = ((unsigned int*)(index_buffers[f1]+4 +8*sz))[2];
real_count = ((unsigned int*)(index_buffers[f1]+4 +8*sz))[3];
mRecCount = real_count;
if(idx_vals.count(index_name) == 0) {
cudaMalloc((void **) &d_str, (vals_count+2)*int_size);
cudaMemcpy( d_str, (void *) &((index_buffers[f1]+4 +8*sz)[0]), (vals_count+2)*int_size, cudaMemcpyHostToDevice);
idx_vals[index_name] = (unsigned long long int*)d_str;
};
}
else {
f = fopen (f1.c_str(), "rb" );
fread(&sz, 4, 1, f);
int_type* d_array = new int_type[sz];
idx_dictionary_int[index_name].clear();
fread((void*)d_array, sz*int_size, 1, f);
for(unsigned int i = 0; i < sz; i++) {
idx_dictionary_int[index_name][d_array[i]] = i;
};
delete [] d_array;
fread(&fit_count, 4, 1, f);
fread(&bits_encoded, 4, 1, f);
fread(&vals_count, 4, 1, f);
fread(&real_count, 4, 1, f);
mRecCount = real_count;
unsigned long long int* int_array = new unsigned long long int[vals_count+2];
fseek ( f , -16 , SEEK_CUR );
fread((void*)int_array, 1, vals_count*8 + 16, f);
fread(&res, 1, 1, f);
fclose(f);
void* d_str;
cudaMalloc((void **) &d_str, (vals_count+2)*int_size);
cudaMemcpy( d_str, (void *) int_array, (vals_count+2)*int_size, cudaMemcpyHostToDevice);
if(idx_vals.count(index_name))
cudaFree(idx_vals[index_name]);
idx_vals[index_name] = (unsigned long long int*)d_str;
}
return res;
}
void CudaSet::initialize(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, size_t Recs, string file_name) // compressed data for DIM tables
{
mColumnCount = (unsigned int)nameRef.size();
FILE* f;
string f1;
unsigned int cnt;
char buffer[4000];
string str;
not_compressed = 0;
mRecCount = Recs;
hostRecCount = Recs;
totalRecs = Recs;
load_file_name = file_name;
f1 = file_name + ".sort";
f = fopen (f1.c_str() , "rb" );
if(f) {
unsigned int sz, idx;
fread((char *)&sz, 4, 1, f);
for(unsigned int j = 0; j < sz; j++) {
fread((char *)&idx, 4, 1, f);
fread(buffer, idx, 1, f);
str.assign(buffer, idx);
sorted_fields.push(str);
if(verbose)
cout << "segment sorted on " << str << endl;
};
fclose(f);
};
f1 = file_name + ".presort";
f = fopen (f1.c_str() , "rb" );
if(f) {
unsigned int sz, idx;
fread((char *)&sz, 4, 1, f);
for(unsigned int j = 0; j < sz; j++) {
fread((char *)&idx, 4, 1, f);
fread(buffer, idx, 1, f);
str.assign(buffer, idx);
presorted_fields.push(str);
if(verbose)
cout << "presorted on " << str << endl;
};
fclose(f);
};
tmp_table = 0;
filtered = 0;
for(unsigned int i=0; i < mColumnCount; i++) {
//f1 = file_name + "." + nameRef.front() + ".0";
//f = fopen (f1.c_str() , "rb" );
//fread((char *)&bytes, 4, 1, f); //need to read metadata such as type and length
//fclose(f);
columnNames.push_back(nameRef.front());
cols[colsRef.front()] = nameRef.front();
if (((typeRef.front()).compare("decimal") == 0) || ((typeRef.front()).compare("int") == 0)) {
f1 = file_name + "." + nameRef.front() + ".0";
f = fopen (f1.c_str() , "rb" );
if(!f) {
cout << "Couldn't find field " << nameRef.front() << endl;
exit(0);
};
for(unsigned int j = 0; j < 6; j++)
fread((char *)&cnt, 4, 1, f);
fclose(f);
compTypes[nameRef.front()] = cnt;
};
if((typeRef.front()).compare("timestamp") == 0)
ts_cols[nameRef.front()] = 1;
else
ts_cols[nameRef.front()] = 0;
if ((typeRef.front()).compare("int") == 0 || (typeRef.front()).compare("timestamp") == 0) {
type[nameRef.front()] = 0;
decimal[nameRef.front()] = 0;
decimal_zeroes[nameRef.front()] = 0;
h_columns_int[nameRef.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
d_columns_int[nameRef.front()] = thrust::device_vector<int_type>();
}
else
if ((typeRef.front()).compare("float") == 0) {
type[nameRef.front()] = 1;
decimal[nameRef.front()] = 0;
h_columns_float[nameRef.front()] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
d_columns_float[nameRef.front()] = thrust::device_vector<float_type >();
}
else
if ((typeRef.front()).compare("decimal") == 0) {
type[nameRef.front()] = 0;
decimal[nameRef.front()] = 1;
decimal_zeroes[nameRef.front()] = sizeRef.front();
h_columns_int[nameRef.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
d_columns_int[nameRef.front()] = thrust::device_vector<int_type>();
}
else {
type[nameRef.front()] = 2;
decimal[nameRef.front()] = 0;
h_columns_char[nameRef.front()] = nullptr;
d_columns_char[nameRef.front()] = nullptr;
char_size[nameRef.front()] = sizeRef.front();
string_map[nameRef.front()] = file_name + "." + nameRef.front();
};
nameRef.pop();
typeRef.pop();
sizeRef.pop();
colsRef.pop();
};
};
void CudaSet::initialize(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, size_t Recs)
{
mColumnCount = (unsigned int)nameRef.size();
tmp_table = 0;
filtered = 0;
mRecCount = 0;
hostRecCount = Recs;
segCount = 0;
for(unsigned int i=0; i < mColumnCount; i++) {
columnNames.push_back(nameRef.front());
cols[colsRef.front()] = nameRef.front();
if((typeRef.front()).compare("timestamp") == 0)
ts_cols[nameRef.front()] = 1;
else
ts_cols[nameRef.front()] = 0;
if ((typeRef.front()).compare("int") == 0 || (typeRef.front()).compare("timestamp") == 0) {
type[nameRef.front()] = 0;
decimal[nameRef.front()] = 0;
decimal_zeroes[nameRef.front()] = 0;
h_columns_int[nameRef.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
d_columns_int[nameRef.front()] = thrust::device_vector<int_type>();
}
else
if ((typeRef.front()).compare("float") == 0) {
type[nameRef.front()] = 1;
decimal[nameRef.front()] = 0;
h_columns_float[nameRef.front()] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
d_columns_float[nameRef.front()] = thrust::device_vector<float_type>();
}
else
if ((typeRef.front()).compare("decimal") == 0) {
type[nameRef.front()] = 0;
decimal[nameRef.front()] = 1;
decimal_zeroes[nameRef.front()] = sizeRef.front();
h_columns_int[nameRef.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
d_columns_int[nameRef.front()] = thrust::device_vector<int_type>();
}
else {
type[nameRef.front()] = 2;
decimal[nameRef.front()] = 0;
h_columns_char[nameRef.front()] = nullptr;
d_columns_char[nameRef.front()] = nullptr;
char_size[nameRef.front()] = sizeRef.front();
};
nameRef.pop();
typeRef.pop();
sizeRef.pop();
colsRef.pop();
};
};
void CudaSet::initialize(const size_t RecordCount, const unsigned int ColumnCount)
{
mRecCount = RecordCount;
hostRecCount = RecordCount;
mColumnCount = ColumnCount;
filtered = 0;
};
void CudaSet::initialize(queue<string> op_sel, const queue<string> op_sel_as)
{
mRecCount = 0;
mColumnCount = (unsigned int)op_sel.size();
segCount = 1;
not_compressed = 1;
filtered = 0;
col_aliases = op_sel_as;
unsigned int i = 0;
CudaSet *a;
while(!op_sel.empty()) {
for(auto it = varNames.begin(); it != varNames.end(); it++) {
a = it->second;
if(std::find(a->columnNames.begin(), a->columnNames.end(), op_sel.front()) != a->columnNames.end())
break;
};
type[op_sel.front()] = a->type[op_sel.front()];
cols[i] = op_sel.front();
decimal[op_sel.front()] = a->decimal[op_sel.front()];
decimal_zeroes[op_sel.front()] = a->decimal_zeroes[op_sel.front()];
columnNames.push_back(op_sel.front());
if (a->type[op_sel.front()] == 0) {
d_columns_int[op_sel.front()] = thrust::device_vector<int_type>();
//h_columns_int[op_sel.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
h_columns_int[op_sel.front()] = thrust::host_vector<int_type>();
}
else
if (a->type[op_sel.front()] == 1) {
d_columns_float[op_sel.front()] = thrust::device_vector<float_type>();
//h_columns_float[op_sel.front()] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
h_columns_float[op_sel.front()] = thrust::host_vector<float_type>();
}
else {
h_columns_char[op_sel.front()] = nullptr;
d_columns_char[op_sel.front()] = nullptr;
char_size[op_sel.front()] = a->char_size[op_sel.front()];
};
i++;
op_sel.pop();
};
}
void CudaSet::initialize(CudaSet* a, CudaSet* b, queue<string> op_sel, queue<string> op_sel_as)
{
mRecCount = 0;
mColumnCount = 0;
queue<string> q_cnt(op_sel);
unsigned int i = 0;
set<string> field_names;
while(!q_cnt.empty()) {
if( std::find(a->columnNames.begin(), a->columnNames.end(), q_cnt.front()) != a->columnNames.end() ||
std::find(b->columnNames.begin(), b->columnNames.end(), q_cnt.front()) != b->columnNames.end()) {
field_names.insert(q_cnt.front());
};
q_cnt.pop();
}
mColumnCount = (unsigned int)field_names.size();
maxRecs = b->maxRecs;
segCount = 1;
filtered = 0;
not_compressed = 1;
col_aliases = op_sel_as;
i = 0;
while(!op_sel.empty()) {
if(std::find(columnNames.begin(), columnNames.end(), op_sel.front()) == columnNames.end()) {
if(std::find(a->columnNames.begin(), a->columnNames.end(), op_sel.front()) != a->columnNames.end()) {
cols[i] = op_sel.front();
decimal[op_sel.front()] = a->decimal[op_sel.front()];
columnNames.push_back(op_sel.front());
type[op_sel.front()] = a->type[op_sel.front()];
ts_cols[op_sel.front()] = a->ts_cols[op_sel.front()];
if (a->type[op_sel.front()] == 0) {
d_columns_int[op_sel.front()] = thrust::device_vector<int_type>();
h_columns_int[op_sel.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
if(a->string_map.find(op_sel.front()) != a->string_map.end()) {
string_map[op_sel.front()] = a->string_map[op_sel.front()];
};
decimal[op_sel.front()] = a->decimal[op_sel.front()];
decimal_zeroes[op_sel.front()] = a->decimal_zeroes[op_sel.front()];
}
else
if (a->type[op_sel.front()] == 1) {
d_columns_float[op_sel.front()] = thrust::device_vector<float_type>();
h_columns_float[op_sel.front()] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
}
else {
h_columns_char[op_sel.front()] = nullptr;
d_columns_char[op_sel.front()] = nullptr;
char_size[op_sel.front()] = a->char_size[op_sel.front()];
string_map[op_sel.front()] = a->string_map[op_sel.front()];
};
i++;
}
else
if(std::find(b->columnNames.begin(), b->columnNames.end(), op_sel.front()) != b->columnNames.end()) {
columnNames.push_back(op_sel.front());
cols[i] = op_sel.front();
decimal[op_sel.front()] = b->decimal[op_sel.front()];
type[op_sel.front()] = b->type[op_sel.front()];
ts_cols[op_sel.front()] = b->ts_cols[op_sel.front()];
if (b->type[op_sel.front()] == 0) {
d_columns_int[op_sel.front()] = thrust::device_vector<int_type>();
h_columns_int[op_sel.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
if(b->string_map.find(op_sel.front()) != b->string_map.end()) {
string_map[op_sel.front()] = b->string_map[op_sel.front()];
};
decimal[op_sel.front()] = b->decimal[op_sel.front()];
decimal_zeroes[op_sel.front()] = b->decimal_zeroes[op_sel.front()];
}
else
if (b->type[op_sel.front()] == 1) {
d_columns_float[op_sel.front()] = thrust::device_vector<float_type>();
h_columns_float[op_sel.front()] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
}
else {
h_columns_char[op_sel.front()] = nullptr;
d_columns_char[op_sel.front()] = nullptr;
char_size[op_sel.front()] = b->char_size[op_sel.front()];
string_map[op_sel.front()] = b->string_map[op_sel.front()];
};
i++;
}
}
op_sel.pop();
};
};
int_type reverse_op(int_type op_type)
{
if (op_type == 2) // >
return 1;
else
if (op_type == 1) // <
return 2;
else
if (op_type == 6) // >=
return 5;
else
if (op_type == 5) // <=
return 6;
else
return op_type;
}
size_t getFreeMem()
{
size_t available, total;
cudaMemGetInfo(&available, &total);
return available;
} ;
void allocColumns(CudaSet* a, queue<string> fields)
{
if(a->filtered) {
CudaSet* t;
if(a->filtered)
t = varNames[a->source_name];
else
t = a;
if(int_size*t->maxRecs > alloced_sz) {
if(alloced_sz) {
cudaFree(alloced_tmp);
};
cudaMalloc((void **) &alloced_tmp, int_size*t->maxRecs);
alloced_sz = int_size*t->maxRecs;
}
}
else {
while(!fields.empty()) {
if(var_exists(a, fields.front()) && !a->onDevice(fields.front())) {
a->allocColumnOnDevice(fields.front(), a->maxRecs);
}
fields.pop();
};
};
}
void gatherColumns(CudaSet* a, CudaSet* t, string field, unsigned int segment, size_t& count)
{
if(!a->onDevice(field)) {
a->allocColumnOnDevice(field, a->maxRecs);
};
if(a->prm_index == 'R') {
mygather(field, a, t, count, a->mRecCount);
}
else {
mycopy(field, a, t, count, t->mRecCount);
a->mRecCount = t->mRecCount;
};
}
void copyFinalize(CudaSet* a, queue<string> fields, bool ts)
{
set<string> uniques;
if(scratch.size() < a->mRecCount*8)
scratch.resize(a->mRecCount*8);
thrust::device_ptr<int_type> tmp((int_type*)thrust::raw_pointer_cast(scratch.data()));
while(!fields.empty()) {
if (uniques.count(fields.front()) == 0 && var_exists(a, fields.front()) && cpy_bits.find(fields.front()) != cpy_bits.end() && (!a->ts_cols[fields.front()] || ts)) {
if(cpy_bits[fields.front()] == 8) {
if(a->type[fields.front()] != 1) {
thrust::device_ptr<unsigned char> src((unsigned char*)thrust::raw_pointer_cast(a->d_columns_int[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned char>());
}
else {
thrust::device_ptr<unsigned char> src((unsigned char*)thrust::raw_pointer_cast(a->d_columns_float[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned char>());
};
}
else
if(cpy_bits[fields.front()] == 16) {
if(a->type[fields.front()] != 1) {
thrust::device_ptr<unsigned short int> src((unsigned short int*)thrust::raw_pointer_cast(a->d_columns_int[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned short>());
}
else {
thrust::device_ptr<unsigned short int> src((unsigned short int*)thrust::raw_pointer_cast(a->d_columns_float[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned short>());
};
}
else
if(cpy_bits[fields.front()] == 32) {
if(a->type[fields.front()] != 1) {
thrust::device_ptr<unsigned int> src((unsigned int*)thrust::raw_pointer_cast(a->d_columns_int[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned int>());
}
else {
thrust::device_ptr<unsigned int> src((unsigned int*)thrust::raw_pointer_cast(a->d_columns_float[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned int>());
};
}
else {
if(a->type[fields.front()] != 1) {
thrust::device_ptr<int_type> src((int_type*)thrust::raw_pointer_cast(a->d_columns_int[fields.front()].data()));
thrust::copy(src, src+a->mRecCount, tmp);
}
else {
thrust::device_ptr<int_type> src((int_type*)thrust::raw_pointer_cast(a->d_columns_float[fields.front()].data()));
thrust::copy(src, src+a->mRecCount, tmp);
};
};
thrust::constant_iterator<int_type> iter(cpy_init_val[fields.front()]);
if(a->type[fields.front()] != 1) {
thrust::transform(tmp, tmp + a->mRecCount, iter, a->d_columns_int[fields.front()].begin(), thrust::plus<int_type>());
}
else {
thrust::device_ptr<int_type> dest((int_type*)thrust::raw_pointer_cast(a->d_columns_float[fields.front()].data()));
thrust::transform(tmp, tmp + a->mRecCount, iter, dest, thrust::plus<int_type>());
thrust::transform(dest, dest+a->mRecCount, a->d_columns_float[fields.front()].begin(), long_to_float());
};
};
uniques.insert(fields.front());
fields.pop();
};
}
void copyColumns(CudaSet* a, queue<string> fields, unsigned int segment, size_t& count, bool rsz, bool flt)
{
//std::clock_t start1 = std::clock();
set<string> uniques;
if(a->filtered) { //filter the segment
if(flt) {
filter_op(a->fil_s, a->fil_f, segment);
};
if(rsz && a->mRecCount) {
queue<string> fields1(fields);
while(!fields1.empty()) {
a->resizeDeviceColumn(a->devRecCount + a->mRecCount, fields1.front());
fields1.pop();
};
a->devRecCount = a->devRecCount + a->mRecCount;
};
};
cpy_bits.clear();
cpy_init_val.clear();
auto f(fields);
while(!fields.empty()) {
if (uniques.count(fields.front()) == 0 && var_exists(a, fields.front())) {
if(a->filtered) {
if(a->mRecCount) {
CudaSet *t = varNames[a->source_name];
alloced_switch = 1;
t->CopyColumnToGpu(fields.front(), segment);
gatherColumns(a, t, fields.front(), segment, count);
alloced_switch = 0;
};
}
else {
if(a->mRecCount) {
a->CopyColumnToGpu(fields.front(), segment, count);
};
};
uniques.insert(fields.front());
};
fields.pop();
};
//std::cout<< "copy time " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) <<'\n';
}
void mygather(string colname, CudaSet* a, CudaSet* t, size_t offset, size_t g_size)
{
if(t->type[colname] != 1 ) {
if(cpy_bits.find(colname) != cpy_bits.end()) { // non-delta compression
if(cpy_bits[colname] == 8) {
thrust::device_ptr<unsigned char> d_col_source((unsigned char*)alloced_tmp);
thrust::device_ptr<unsigned char> d_col_dest((unsigned char*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 16) {
thrust::device_ptr<unsigned short int> d_col_source((unsigned short int*)alloced_tmp);
thrust::device_ptr<unsigned short int> d_col_dest((unsigned short int*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 32) {
thrust::device_ptr<unsigned int> d_col_source((unsigned int*)alloced_tmp);
thrust::device_ptr<unsigned int> d_col_dest((unsigned int*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 64) {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col, a->d_columns_int[colname].begin() + offset);
};
}
else {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col, a->d_columns_int[colname].begin() + offset);
};
}
else {
if(cpy_bits.find(colname) != cpy_bits.end()) { // non-delta compression
if(cpy_bits[colname] == 8) {
thrust::device_ptr<unsigned char> d_col_source((unsigned char*)alloced_tmp);
thrust::device_ptr<unsigned char> d_col_dest((unsigned char*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 16) {
thrust::device_ptr<unsigned short int> d_col_source((unsigned short int*)alloced_tmp);
thrust::device_ptr<unsigned short int> d_col_dest((unsigned short int*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 32) {
thrust::device_ptr<unsigned int> d_col_source((unsigned int*)alloced_tmp);
thrust::device_ptr<unsigned int> d_col_dest((unsigned int*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 64) {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col, a->d_columns_float[colname].begin() + offset);
};
}
else {
thrust::device_ptr<float_type> d_col((float_type*)alloced_tmp);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col, a->d_columns_float[colname].begin() + offset);
};
}
};
void mycopy(string colname, CudaSet* a, CudaSet* t, size_t offset, size_t g_size)
{
if(t->type[colname] != 1) {
if(cpy_bits.find(colname) != cpy_bits.end()) { // non-delta compression
if(cpy_bits[colname] == 8) {
thrust::device_ptr<unsigned char> d_col_source((unsigned char*)alloced_tmp);
thrust::device_ptr<unsigned char> d_col_dest((unsigned char*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 16) {
thrust::device_ptr<short int> d_col_source((short int*)alloced_tmp);
thrust::device_ptr<short int> d_col_dest((short int*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()+offset));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 32) {
thrust::device_ptr<unsigned int> d_col_source((unsigned int*)alloced_tmp);
thrust::device_ptr<unsigned int> d_col_dest((unsigned int*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 64) {
thrust::device_ptr<int_type> d_col_source((int_type*)alloced_tmp);
thrust::copy(d_col_source, d_col_source + g_size, a->d_columns_int[colname].begin() + offset);
};
}
else {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::copy(d_col, d_col + g_size, a->d_columns_int[colname].begin() + offset);
};
}
else {
if(cpy_bits.find(colname) != cpy_bits.end()) { // non-delta compression
if(cpy_bits[colname] == 8) {
thrust::device_ptr<unsigned char> d_col_source((unsigned char*)alloced_tmp);
thrust::device_ptr<unsigned char> d_col_dest((unsigned char*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 16) {
thrust::device_ptr<short int> d_col_source((short int*)alloced_tmp);
thrust::device_ptr<short int> d_col_dest((short int*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()+offset));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 32) {
thrust::device_ptr<unsigned int> d_col_source((unsigned int*)alloced_tmp);
thrust::device_ptr<unsigned int> d_col_dest((unsigned int*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 64) {
thrust::device_ptr<int_type> d_col_source((int_type*)alloced_tmp);
thrust::copy(d_col_source, d_col_source + g_size, a->d_columns_float[colname].begin() + offset);
};
}
else {
thrust::device_ptr<float_type> d_col((float_type*)alloced_tmp);
thrust::copy(d_col, d_col + g_size, a->d_columns_float[colname].begin() + offset);
};
};
};
size_t load_queue(queue<string> c1, CudaSet* right, string f2, size_t &rcount,
unsigned int start_segment, unsigned int end_segment, bool rsz, bool flt)
{
queue<string> cc;
while(!c1.empty()) {
if(std::find(right->columnNames.begin(), right->columnNames.end(), c1.front()) != right->columnNames.end()) {
if(f2 != c1.front() ) {
cc.push(c1.front());
};
};
c1.pop();
};
if(std::find(right->columnNames.begin(), right->columnNames.end(), f2) != right->columnNames.end()) {
cc.push(f2);
};
if(right->filtered) {
allocColumns(right, cc);
};
rcount = right->maxRecs;
queue<string> ct(cc);
while(!ct.empty()) {
if(right->filtered && rsz) {
right->mRecCount = 0;
}
else {
right->allocColumnOnDevice(ct.front(), rcount*right->segCount);
};
ct.pop();
};
size_t cnt_r = 0;
right->devRecCount = 0;
for(unsigned int i = start_segment; i < end_segment; i++) {
if(!right->filtered)
copyColumns(right, cc, i, cnt_r, rsz, 0);
else
copyColumns(right, cc, i, cnt_r, rsz, flt);
cnt_r = cnt_r + right->mRecCount;
};
right->mRecCount = cnt_r;
return cnt_r;
}
size_t max_char(CudaSet* a)
{
size_t max_char1 = 8;
for(unsigned int i = 0; i < a->columnNames.size(); i++) {
if(a->type[a->columnNames[i]] == 2) {
if (a->char_size[a->columnNames[i]] > max_char1)
max_char1 = a->char_size[a->columnNames[i]];
}
else
if(a->type[a->columnNames[i]] == 0 && a->string_map.find(a->columnNames[i]) != a->string_map.end()) {
auto s = a->string_map[a->columnNames[i]];
auto pos = s.find_first_of(".");
auto len = data_dict[s.substr(0, pos)][s.substr(pos+1)].col_length;
if (len > max_char1)
max_char1 = len;
};
};
return max_char1;
};
size_t max_char(CudaSet* a, queue<string> field_names)
{
size_t max_char = 8;
while (!field_names.empty()) {
if (a->type[field_names.front()] == 2) {
if (a->char_size[field_names.front()] > max_char)
max_char = a->char_size[field_names.front()];
};
field_names.pop();
};
return max_char;
};
void setSegments(CudaSet* a, queue<string> cols)
{
size_t mem_available = getFreeMem();
size_t tot_sz = 0;
while(!cols.empty()) {
if(a->type[cols.front()] != 2)
tot_sz = tot_sz + int_size;
else
tot_sz = tot_sz + a->char_size[cols.front()];
cols.pop();
};
if(a->mRecCount*tot_sz > mem_available/3) { //default is 3
a->segCount = (a->mRecCount*tot_sz)/(mem_available/5) + 1;
a->maxRecs = (a->mRecCount/a->segCount)+1;
};
};
void update_permutation_char_host(char* key, unsigned int* permutation, size_t RecCount, string SortType, char* tmp, unsigned int len)
{
str_gather_host(permutation, RecCount, (void*)key, (void*)tmp, len);
if (SortType.compare("DESC") == 0 )
str_sort_host(tmp, RecCount, permutation, 1, len);
else
str_sort_host(tmp, RecCount, permutation, 0, len);
}
void apply_permutation_char(char* key, unsigned int* permutation, size_t RecCount, char* tmp, unsigned int len)
{
// copy keys to temporary vector
cudaMemcpy( (void*)tmp, (void*) key, RecCount*len, cudaMemcpyDeviceToDevice);
// permute the keys
str_gather((void*)permutation, RecCount, (void*)tmp, (void*)key, len);
}
void apply_permutation_char_host(char* key, unsigned int* permutation, size_t RecCount, char* res, unsigned int len)
{
str_gather_host(permutation, RecCount, (void*)key, (void*)res, len);
}
void filter_op(const char *s, const char *f, unsigned int segment)
{
CudaSet *a, *b;
a = varNames.find(f)->second;
a->name = f;
//std::clock_t start1 = std::clock();
if(a->mRecCount == 0 && !a->filtered) {
b = new CudaSet(0,1);
}
else {
if(verbose)
cout << "FILTER " << s << " " << f << " " << getFreeMem() << '\xd';
b = varNames[s];
b->name = s;
b->string_map = a->string_map;
size_t cnt = 0;
b->sorted_fields = a->sorted_fields;
b->ts_cols = a->ts_cols;
allocColumns(a, b->fil_value);
if (b->prm_d.size() == 0) {
b->prm_d.resize(a->maxRecs);
};
cout << endl << "MAP CHECK start " << segment << endl;
char map_check = zone_map_check(b->fil_type,b->fil_value,b->fil_nums, b->fil_nums_f, b->fil_nums_precision, a, segment);
cout << endl << "MAP CHECK segment " << segment << " " << map_check << endl;
if(map_check == 'R') {
auto old_ph = phase_copy;
phase_copy = 0;
copyColumns(a, b->fil_value, segment, cnt);
phase_copy = old_ph;
bool* res = filter(b->fil_type,b->fil_value,b->fil_nums, b->fil_nums_f, b->fil_nums_precision, a, segment);
thrust::device_ptr<bool> bp((bool*)res);
b->prm_index = 'R';
b->mRecCount = thrust::count(bp, bp + (unsigned int)a->mRecCount, 1);
thrust::copy_if(thrust::make_counting_iterator((unsigned int)0), thrust::make_counting_iterator((unsigned int)a->mRecCount),
bp, b->prm_d.begin(), thrust::identity<bool>());
cudaFree(res);
}
else {
b->prm_index = map_check;
if(map_check == 'A')
b->mRecCount = a->mRecCount;
else
b->mRecCount = 0;
};
if(segment == a->segCount-1)
a->deAllocOnDevice();
}
if(verbose)
cout << endl << "filter result " << b->mRecCount << endl;
}
size_t load_right(CudaSet* right, string f2, queue<string> op_g, queue<string> op_alt, size_t& rcount, unsigned int start_seg, unsigned int end_seg) {
size_t cnt_r = 0;
//if join is on strings then add integer columns to left and right tables and modify colInd1 and colInd2
// need to allocate all right columns
if(right->not_compressed) {
queue<string> op_alt1;
op_alt1.push(f2);
cnt_r = load_queue(op_alt1, right, "", rcount, start_seg, end_seg, 1, 1);
queue<string> op_alt2;
while(!op_alt.empty()) {
if(f2.compare(op_alt.front())) {
if (std::find(right->columnNames.begin(), right->columnNames.end(), op_alt.front()) != right->columnNames.end()) {
op_alt2.push(op_alt.front());
};
};
op_alt.pop();
};
if(!op_alt2.empty())
cnt_r = load_queue(op_alt2, right, "", rcount, start_seg, end_seg, 0, 0);
}
else {
cnt_r = load_queue(op_alt, right, f2, rcount, start_seg, end_seg, 1, 1);
};
return cnt_r;
};
void insert_records(const char* f, const char* s) {
char buf[4096];
size_t size, maxRecs, cnt = 0;
string str_s, str_d;
if(varNames.find(s) == varNames.end()) {
process_error(3, "couldn't find " + string(s) );
};
CudaSet *a;
a = varNames.find(s)->second;
a->name = s;
if(varNames.find(f) == varNames.end()) {
process_error(3, "couldn't find " + string(f) );
};
CudaSet *b;
b = varNames.find(f)->second;
b->name = f;
// if both source and destination are on disk
cout << "SOURCES " << a->source << ":" << b->source << endl;
if(a->source && b->source) {
for(unsigned int i = 0; i < a->segCount; i++) {
for(unsigned int z = 0; z < a->columnNames.size(); z++) {
if(a->type[a->columnNames[z]] != 2) {
str_s = a->load_file_name + "." + a->columnNames[z] + "." + to_string(i);
str_d = b->load_file_name + "." + a->columnNames[z] + "." + to_string(b->segCount + i);
cout << str_s << " " << str_d << endl;
FILE* source = fopen(str_s.c_str(), "rb");
FILE* dest = fopen(str_d.c_str(), "wb");
while (size = fread(buf, 1, BUFSIZ, source)) {
fwrite(buf, 1, size, dest);
}
fclose(source);
fclose(dest);
}
else { //merge strings
//read b's strings
str_s = b->load_file_name + "." + b->columnNames[z];
FILE* dest = fopen(str_s.c_str(), "rb");
auto len = b->char_size[b->columnNames[z]];
map<string, unsigned long long int> map_d;
buf[len] = 0;
unsigned long long cnt = 0;
while (fread(buf, len, 1, dest)) {
map_d[buf] = cnt;
cnt++;
};
fclose(dest);
unsigned long long int cct = cnt;
str_s = a->load_file_name + "." + a->columnNames[z] + "." + to_string(i) + ".hash";
str_d = b->load_file_name + "." + b->columnNames[z] + "." + to_string(b->segCount + i) + ".hash";
FILE* source = fopen(str_s.c_str(), "rb");
dest = fopen(str_d.c_str(), "wb");
while (size = fread(buf, 1, BUFSIZ, source)) {
fwrite(buf, 1, size, dest);
}
fclose(source);
fclose(dest);
str_s = a->load_file_name + "." + a->columnNames[z];
source = fopen(str_s.c_str(), "rb");
map<unsigned long long int, string> map_s;
buf[len] = 0;
cnt = 0;
while (fread(buf, len, 1, source)) {
map_s[cnt] = buf;
cnt++;
};
fclose(source);
queue<string> op_vx;
op_vx.push(a->columnNames[z]);
allocColumns(a, op_vx);
a->resize(a->maxRecs);
a->CopyColumnToGpu(a->columnNames[z], z, 0);
a->CopyColumnToHost(a->columnNames[z]);
str_d = b->load_file_name + "." + b->columnNames[z];
fstream f_file;
f_file.open(str_d.c_str(), ios::out|ios::app|ios::binary);
for(auto j = 0; j < a->mRecCount; j++) {
auto ss = map_s[a->h_columns_int[a->columnNames[z]][j]];
if(map_d.find(ss) == map_d.end()) { //add
f_file.write((char *)ss.c_str(), len);
a->h_columns_int[a->columnNames[z]][j] = cct;
cct++;
}
else {
a->h_columns_int[a->columnNames[z]][j] = map_d[ss];
};
};
f_file.close();
thrust::device_vector<int_type> d_col(a->mRecCount);
thrust::copy(a->h_columns_int[a->columnNames[z]].begin(), a->h_columns_int[a->columnNames[z]].begin() + a->mRecCount, d_col.begin());
auto i_name = b->load_file_name + "." + b->columnNames[z] + "." + to_string(b->segCount + i) + ".idx";
pfor_compress(thrust::raw_pointer_cast(d_col.data()), a->mRecCount*int_size, i_name, a->h_columns_int[a->columnNames[z]], 0);
};
};
};
if(a->maxRecs > b->maxRecs)
maxRecs = a->maxRecs;
else
maxRecs = b->maxRecs;
for(unsigned int i = 0; i < b->columnNames.size(); i++) {
b->reWriteHeader(b->load_file_name, b->columnNames[i], a->segCount + b->segCount, a->totalRecs + b->totalRecs, maxRecs);
};
}
else
if(!a->source && !b->source) { //if both source and destination are in memory
size_t oldCount = b->mRecCount;
b->resize(a->mRecCount);
for(unsigned int z = 0; z< b->mColumnCount; z++) {
if(b->type[a->columnNames[z]] == 0) {
thrust::copy(a->h_columns_int[a->columnNames[z]].begin(), a->h_columns_int[a->columnNames[z]].begin() + a->mRecCount, b->h_columns_int[b->columnNames[z]].begin() + oldCount);
}
else
if(b->type[a->columnNames[z]] == 1) {
thrust::copy(a->h_columns_float[a->columnNames[z]].begin(), a->h_columns_float[a->columnNames[z]].begin() + a->mRecCount, b->h_columns_float[b->columnNames[z]].begin() + oldCount);
}
else {
cudaMemcpy(b->h_columns_char[b->columnNames[z]] + b->char_size[b->columnNames[z]]*oldCount, a->h_columns_char[a->columnNames[z]], a->char_size[a->columnNames[z]]*a->mRecCount, cudaMemcpyHostToHost);
};
};
}
else
if(!a->source && b->source) {
total_segments = b->segCount;
total_count = b->mRecCount;
total_max = b->maxRecs;;
queue<string> op_vx;
for(unsigned int i=0; i < a->columnNames.size(); i++)
op_vx.push(a->columnNames[i]);
allocColumns(a, op_vx);
a->resize(a->maxRecs);
for(unsigned int i = 0; i < a->segCount; i++) {
if (a->filtered) {
copyColumns(a, op_vx, i, cnt);
a->CopyToHost(0, a->mRecCount);
};
a->compress(b->load_file_name, 0, 1, i - (a->segCount-1), a->mRecCount, 0);
};
for(unsigned int i = 0; i < b->columnNames.size(); i++) {
b->writeHeader(b->load_file_name, b->columnNames[i], total_segments);
};
};
};
void delete_records(const char* f) {
CudaSet *a;
a = varNames.find(f)->second;
a->name = f;
size_t totalRemoved = 0;
size_t maxRecs = 0;
if(!a->keep) { // temporary variable
process_error(2, "Delete operator is only applicable to disk based sets\nfor deleting records from derived sets please use filter operator ");
}
else { // read matching segments, delete, compress and write on a disk replacing the original segments
string str, str_old;
queue<string> op_vx;
size_t cnt;
for ( auto it=data_dict[a->load_file_name].begin() ; it != data_dict[a->load_file_name].end(); ++it ) {
op_vx.push((*it).first);
if (std::find(a->columnNames.begin(), a->columnNames.end(), (*it).first) == a->columnNames.end()) {
if ((*it).second.col_type == 0) {
a->type[(*it).first] = 0;
a->decimal[(*it).first] = 0;
//a->h_columns_int[(*it).first] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
a->h_columns_int[(*it).first] = thrust::host_vector<int_type>();
a->d_columns_int[(*it).first] = thrust::device_vector<int_type>();
}
else
if((*it).second.col_type == 1) {
a->type[(*it).first] = 1;
a->decimal[(*it).first] = 0;
//a->h_columns_float[(*it).first] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
a->h_columns_float[(*it).first] = thrust::host_vector<float_type>();
a->d_columns_float[(*it).first] = thrust::device_vector<float_type>();
}
else
if ((*it).second.col_type == 3) {
a->type[(*it).first] = 1;
a->decimal[(*it).first] = 1;
//a->h_columns_float[(*it).first] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
a->h_columns_float[(*it).first] = thrust::host_vector<float_type>();
a->d_columns_float[(*it).first] = thrust::device_vector<float_type>();
}
else {
a->type[(*it).first] = 2;
a->decimal[(*it).first] = 0;
a->h_columns_char[(*it).first] = nullptr;
a->d_columns_char[(*it).first] = nullptr;
a->char_size[(*it).first] = (*it).second.col_length;
};
a->columnNames.push_back((*it).first);
}
};
allocColumns(a, op_vx);
a->resize(a->maxRecs);
a->prm_d.resize(a->maxRecs);
size_t cc = a->mRecCount;
size_t tmp;
void* d;
CUDA_SAFE_CALL(cudaMalloc((void **) &d, a->maxRecs*float_size));
unsigned int new_seg_count = 0;
char map_check;
for(unsigned int i = 0; i < a->segCount; i++) {
map_check = zone_map_check(op_type,op_value,op_nums, op_nums_f, op_nums_precision, a, i);
if(verbose)
cout << "MAP CHECK segment " << i << " " << map_check << endl;
if(map_check != 'N') {
cnt = 0;
copyColumns(a, op_vx, i, cnt);
tmp = a->mRecCount;
if(a->mRecCount) {
bool* res = filter(op_type,op_value,op_nums, op_nums_f, op_nums_precision, a, i);
thrust::device_ptr<bool> bp((bool*)res);
thrust::copy_if(thrust::make_counting_iterator((unsigned int)0), thrust::make_counting_iterator((unsigned int)a->mRecCount),
bp, a->prm_d.begin(), thrust::logical_not<bool>());
a->mRecCount = thrust::count(bp, bp + (unsigned int)a->mRecCount, 0);
cudaFree(res);
// cout << "Remained recs count " << a->mRecCount << endl;
if(a->mRecCount > maxRecs)
maxRecs = a->mRecCount;
if (a->mRecCount) {
totalRemoved = totalRemoved + (tmp - a->mRecCount);
if (a->mRecCount == tmp) { //none deleted
if(new_seg_count != i) {
for (auto it=data_dict[a->load_file_name].begin() ; it != data_dict[a->load_file_name].end(); ++it ) {
auto colname = (*it).first;
str_old = a->load_file_name + "." + colname + "." + to_string(i);
str = a->load_file_name + "." + colname + "." + to_string(new_seg_count);
remove(str.c_str());
rename(str_old.c_str(), str.c_str());
};
};
new_seg_count++;
}
else { //some deleted
//cout << "writing segment " << new_seg_count << endl;
map<string, col_data> s = data_dict[a->load_file_name];
for ( map<string, col_data>::iterator it=s.begin() ; it != s.end(); ++it ) {
string colname = (*it).first;
str = a->load_file_name + "." + colname + "." + to_string(new_seg_count);
if(a->type[colname] == 0) {
thrust::device_ptr<int_type> d_col((int_type*)d);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + a->mRecCount, a->d_columns_int[colname].begin(), d_col);
pfor_compress( d, a->mRecCount*int_size, str, a->h_columns_int[colname], 0);
}
else
if(a->type[colname] == 1) {
thrust::device_ptr<float_type> d_col((float_type*)d);
if(a->decimal[colname]) {
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + a->mRecCount, a->d_columns_float[colname].begin(), d_col);
thrust::device_ptr<long long int> d_col_dec((long long int*)d);
thrust::transform(d_col,d_col+a->mRecCount, d_col_dec, float_to_long());
pfor_compress( d, a->mRecCount*float_size, str, a->h_columns_float[colname], 1);
}
else {
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + a->mRecCount, a->d_columns_float[colname].begin(), d_col);
thrust::copy(d_col, d_col + a->mRecCount, a->h_columns_float[colname].begin());
fstream binary_file(str.c_str(),ios::out|ios::binary);
binary_file.write((char *)&a->mRecCount, 4);
binary_file.write((char *)(a->h_columns_float[colname].data()),a->mRecCount*float_size);
unsigned int comp_type = 3;
binary_file.write((char *)&comp_type, 4);
binary_file.close();
};
}
else {
thrust::device_ptr<int_type> d_col((int_type*)d);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + a->mRecCount, a->d_columns_int[colname].begin(), d_col);
pfor_compress( d, a->mRecCount*int_size, str + ".hash", a->h_columns_int[colname], 0);
};
};
new_seg_count++;
};
}
else {
totalRemoved = totalRemoved + tmp;
};
}
}
else {
if(new_seg_count != i) {
for(unsigned int z = 0; z < a->columnNames.size(); z++) {
str_old = a->load_file_name + "." + a->columnNames[z] + "." + to_string(i);
str = a->load_file_name + "." + a->columnNames[z] + "." + to_string(new_seg_count);
remove(str.c_str());
rename(str_old.c_str(), str.c_str());
};
};
new_seg_count++;
maxRecs = a->maxRecs;
};
};
if (new_seg_count < a->segCount) {
for(unsigned int i = new_seg_count; i < a->segCount; i++) {
//cout << "delete segment " << i << endl;
for(unsigned int z = 0; z < a->columnNames.size(); z++) {
str = a->load_file_name + "." + a->columnNames[z];
str += "." + to_string(i);
remove(str.c_str());
};
};
};
for(unsigned int i = new_seg_count; i < a->segCount; i++) {
a->reWriteHeader(a->load_file_name, a->columnNames[i], new_seg_count, a->totalRecs-totalRemoved, maxRecs);
};
a->mRecCount = cc;
a->prm_d.resize(0);
a->segCount = new_seg_count;
a->deAllocOnDevice();
cudaFree(d);
};
};
void save_col_data(map<string, map<string, col_data> >& data_dict, string file_name)
{
size_t str_len;
fstream binary_file(file_name.c_str(),ios::out|ios::binary|ios::trunc);
size_t len = data_dict.size();
binary_file.write((char *)&len, 8);
for (auto it=data_dict.begin() ; it != data_dict.end(); ++it ) {
str_len = (*it).first.size();
binary_file.write((char *)&str_len, 8);
binary_file.write((char *)(*it).first.data(), str_len);
map<string, col_data> s = (*it).second;
size_t len1 = s.size();
binary_file.write((char *)&len1, 8);
for (auto sit=s.begin() ; sit != s.end(); ++sit ) {
str_len = (*sit).first.size();
binary_file.write((char *)&str_len, 8);
binary_file.write((char *)(*sit).first.data(), str_len);
binary_file.write((char *)&(*sit).second.col_type, 4);
binary_file.write((char *)&(*sit).second.col_length, 4);
};
};
binary_file.close();
}
void load_col_data(map<string, map<string, col_data> >& data_dict, string file_name)
{
size_t str_len, recs, len1;
string str1, str2;
char buffer[4000];
unsigned int col_type, col_length;
fstream binary_file;
binary_file.open(file_name.c_str(),ios::in|ios::binary);
if(binary_file.is_open()) {
binary_file.read((char*)&recs, 8);
for(unsigned int i = 0; i < recs; i++) {
binary_file.read((char*)&str_len, 8);
binary_file.read(buffer, str_len);
str1.assign(buffer, str_len);
binary_file.read((char*)&len1, 8);
for(unsigned int j = 0; j < len1; j++) {
binary_file.read((char*)&str_len, 8);
binary_file.read(buffer, str_len);
str2.assign(buffer, str_len);
binary_file.read((char*)&col_type, 4);
binary_file.read((char*)&col_length, 4);
data_dict[str1][str2].col_type = col_type;
data_dict[str1][str2].col_length = col_length;
//cout << "data DICT " << str1 << " " << str2 << " " << col_type << " " << col_length << endl;
};
};
binary_file.close();
}
else {
cout << "Couldn't open data dictionary" << endl;
};
}
bool var_exists(CudaSet* a, string name) {
if(std::find(a->columnNames.begin(), a->columnNames.end(), name) != a->columnNames.end())
return 1;
else
return 0;
}
int file_exist (const char *filename)
{
std::ifstream infile(filename);
return infile.good();
}
bool check_bitmap_file_exist(CudaSet* left, CudaSet* right)
{
queue<string> cols(right->fil_value);
bool bitmaps_exist = 1;
if(cols.size() == 0) {
bitmaps_exist = 0;
};
while(cols.size() ) {
if (std::find(right->columnNames.begin(), right->columnNames.end(), cols.front()) != right->columnNames.end()) {
string fname = left->load_file_name + "." + right->load_file_name + "." + cols.front() + ".0";
if( !file_exist(fname.c_str())) {
bitmaps_exist = 0;
};
};
cols.pop();
};
return bitmaps_exist;
}
bool check_bitmaps_exist(CudaSet* left, CudaSet* right)
{
//check if there are join bitmap indexes
queue<string> cols(right->fil_value);
bool bitmaps_exist = 1;
if(cols.size() == 0) {
bitmaps_exist = 1;
return 1;
};
while(cols.size() ) {
if (std::find(right->columnNames.begin(), right->columnNames.end(), cols.front()) != right->columnNames.end()) {
string fname = left->load_file_name + "." + right->load_file_name + "." + cols.front() + ".0";
if( !file_exist(fname.c_str())) {
bitmaps_exist = 0;
};
};
cols.pop();
};
if(bitmaps_exist) {
while(!right->fil_nums.empty() ) {
left->fil_nums.push(right->fil_nums.front());
right->fil_nums.pop();
};
while(!right->fil_nums_precision.empty() ) {
left->fil_nums_precision.push(right->fil_nums_precision.front());
right->fil_nums_precision.pop();
};
while(!right->fil_nums_f.empty() ) {
left->fil_nums_f.push(right->fil_nums_f.front());
right->fil_nums_f.pop();
};
while(!right->fil_value.empty() ) {
if (std::find(right->columnNames.begin(), right->columnNames.end(), right->fil_value.front()) != right->columnNames.end()) {
string fname = left->load_file_name + "." + right->load_file_name + "." + right->fil_value.front();
left->fil_value.push(fname);
}
else
left->fil_value.push(right->fil_value.front());
right->fil_value.pop();
};
bool add_and = 1;
if(left->fil_type.empty())
add_and = 0;
while(!right->fil_type.empty() ) {
left->fil_type.push(right->fil_type.front());
right->fil_type.pop();
};
if(add_and) {
left->fil_type.push("AND");
};
return 1;
}
else {
return 0;
};
}
void check_sort(const string str, const char* rtable, const char* rid)
{
CudaSet* right = varNames.find(rtable)->second;
fstream binary_file(str.c_str(),ios::out|ios::binary|ios::app);
binary_file.write((char *)&right->sort_check, 1);
binary_file.close();
}
void update_char_permutation(CudaSet* a, string colname, unsigned int* raw_ptr, string ord, void* temp, bool host)
{
auto s = a->string_map[colname];
auto pos = s.find_first_of(".");
auto len = data_dict[s.substr(0, pos)][s.substr(pos+1)].col_length;
a->h_columns_char[colname] = new char[a->mRecCount*len];
memset(a->h_columns_char[colname], 0, a->mRecCount*len);
thrust::device_ptr<unsigned int> perm(raw_ptr);
thrust::device_ptr<int_type> temp_int((int_type*)temp);
thrust::gather(perm, perm+a->mRecCount, a->d_columns_int[colname].begin(), temp_int);
//for(int z = 0 ; z < a->mRecCount; z++) {
//cout << "Init vals " << a->d_columns_int[colname][z] << " " << perm[z] << " " << temp_int[z] << endl;
//};
//cout << "sz " << a->h_columns_int[colname].size() << " " << a->d_columns_int[colname].size() << " " << len << endl;
cudaMemcpy(thrust::raw_pointer_cast(a->h_columns_int[colname].data()), temp, 8*a->mRecCount, cudaMemcpyDeviceToHost);
FILE *f;
f = fopen(a->string_map[colname].c_str(), "rb");
for(int z = 0 ; z < a->mRecCount; z++) {
fseek(f, a->h_columns_int[colname][z] * len, SEEK_SET);
fread(a->h_columns_char[colname] + z*len, 1, len, f);
};
fclose(f);
if(!host) {
void *d;
cudaMalloc((void **) &d, a->mRecCount*len);
a->d_columns_char[colname] = (char*)d;
cudaMemcpy(a->d_columns_char[colname], a->h_columns_char[colname], len*a->mRecCount, cudaMemcpyHostToDevice);
if (ord.compare("DESC") == 0 )
str_sort(a->d_columns_char[colname], a->mRecCount, raw_ptr, 1, len);
else
str_sort(a->d_columns_char[colname], a->mRecCount, raw_ptr, 0, len);
cudaFree(d);
}
else {
if (ord.compare("DESC") == 0 )
str_sort_host(a->h_columns_char[colname], a->mRecCount, raw_ptr, 1, len);
else
str_sort_host(a->h_columns_char[colname], a->mRecCount, raw_ptr, 0, len);
};
}
void compress_int(const string file_name, const thrust::host_vector<int_type>& res)
{
std::vector<unsigned int> dict_val;
unsigned int bits_encoded;
set<int_type> dict_s;
map<int_type, unsigned int> d_ordered;
for (unsigned int i = 0 ; i < res.size(); i++) {
int_type f = res[i];
dict_s.insert(f);
};
unsigned int i = 0;
for (auto it = dict_s.begin(); it != dict_s.end(); it++) {
d_ordered[*it] = i++;
};
for (unsigned int i = 0 ; i < res.size(); i++) {
int_type f = res[i];
dict_val.push_back(d_ordered[f]);
};
bits_encoded = (unsigned int)ceil(log2(double(d_ordered.size()+1)));
//cout << "bits " << bits_encoded << endl;
unsigned int sz = (unsigned int)d_ordered.size();
// write to a file
fstream binary_file(file_name.c_str(),ios::out|ios::binary|ios::trunc);
binary_file.write((char *)&sz, 4);
for (auto it = d_ordered.begin(); it != d_ordered.end(); it++) {
binary_file.write((char*)(&(it->first)), int_size);
};
unsigned int fit_count = 64/bits_encoded;
unsigned long long int val = 0;
binary_file.write((char *)&fit_count, 4);
binary_file.write((char *)&bits_encoded, 4);
unsigned int curr_cnt = 1;
unsigned int vals_count = (unsigned int)dict_val.size()/fit_count;
if(!vals_count || dict_val.size()%fit_count)
vals_count++;
binary_file.write((char *)&vals_count, 4);
unsigned int real_count = (unsigned int)dict_val.size();
binary_file.write((char *)&real_count, 4);
for(unsigned int i = 0; i < dict_val.size(); i++) {
val = val | dict_val[i];
if(curr_cnt < fit_count)
val = val << bits_encoded;
if( (curr_cnt == fit_count) || (i == (dict_val.size() - 1)) ) {
if (curr_cnt < fit_count) {
val = val << ((fit_count-curr_cnt)-1)*bits_encoded;
};
curr_cnt = 1;
binary_file.write((char *)&val, int_size);
val = 0;
}
else
curr_cnt = curr_cnt + 1;
};
binary_file.close();
};
int_type* get_vec(CudaSet* a, string s1_val, stack<int_type*>& exe_vectors, bool& free_mem) {
int_type* t;
if(std::find(a->columnNames.begin(), a->columnNames.end(), s1_val) != a->columnNames.end()) {
t = a->get_int_by_name(s1_val);
free_mem = 0;
}
else {
t = exe_vectors.top();
exe_vectors.pop();
free_mem = 1;
}
return t;
};
int_type* get_host_vec(CudaSet* a, string s1_val, stack<int_type*>& exe_vectors) {
int_type* t;
if(std::find(a->columnNames.begin(), a->columnNames.end(), s1_val) != a->columnNames.end()) {
t = a->get_host_int_by_name(s1_val);
}
else {
t = exe_vectors.top();
thrust::device_ptr<int_type> st1((int_type*)t);
for(int z = 0; z < 10; z++)
cout << "RESVEC " << st1[z] << endl;
exe_vectors.pop();
}
return t;
};
unsigned int get_decimals(CudaSet* a, string s1_val, stack<unsigned int>& exe_precision) {
unsigned int t;
if(std::find(a->columnNames.begin(), a->columnNames.end(), s1_val) != a->columnNames.end())
t = a->decimal_zeroes[s1_val];
else {
t = exe_precision.top();
exe_precision.pop();
}
return t;
};
#ifdef _WIN64
size_t getTotalSystemMemory()
{
MEMORYSTATUSEX status;
status.dwLength = sizeof(status);
GlobalMemoryStatusEx(&status);
return status.ullTotalPhys;
}
#else
size_t getTotalSystemMemory()
{
long pages = sysconf(_SC_PHYS_PAGES);
long page_size = sysconf(_SC_PAGE_SIZE);
return pages * page_size;
}
#endif
|
the_stack
|
#include "DFImprovedSolver.h"
#include "solvers/kernels/generateQT.h"
/**
* \brief Constructor -- get simulation parameters and grid.
*/
template <typename memoryType>
DFImprovedSolver<memoryType>::DFImprovedSolver(parameterDB *pDB, domain *dInfo)
{
NavierStokesSolver<memoryType>::paramDB = pDB;
NavierStokesSolver<memoryType>::domInfo = dInfo;
} // DFImprovedSolver
/**
* \brief Compute the modified divergence operator.
*/
template <typename memoryType>
void DFImprovedSolver<memoryType>::generateQT()
{
int nx = NavierStokesSolver<memoryType>::domInfo->nx,
ny = NavierStokesSolver<memoryType>::domInfo->ny;
const int N_u = (nx-1)*ny;
NavierStokesSolver<memoryType>::generateQT();
DirectForcingSolver<memoryType>::updateQ();
cusp::coo_matrix<int, real, host_memory> QTHost(nx*ny, (nx-1)*ny+nx*(ny-1), 4*nx*ny-2*(nx+ny));
cusp::blas::fill(QTHost.row_indices, -1);
cusp::blas::fill(QTHost.column_indices, -1);
cusp::blas::fill(QTHost.values, 0.0);
int idx = 0;
for(int j=0; j<ny; j++)
{
for(int i=0; i<nx; i++)
{
int row = j*nx+i;
if(i>0)
{
int I = j*(nx-1)+(i-1);
if(DirectForcingSolver<memoryType>::tags[I]==-1)
{
QTHost.row_indices[idx] = row;
QTHost.column_indices[idx] = I;
QTHost.values[idx] = 1.0;
idx++;
}
else
{
bool flag = false;
int start;
start = (idx>4)? idx-4 : 0;
for(int l=start; l<idx && !flag; l++)
{
if(QTHost.row_indices[l]==row && QTHost.column_indices[l]==DirectForcingSolver<memoryType>::tags[I])
{
flag = true;
QTHost.values[l] += DirectForcingSolver<memoryType>::coeffs[I];
}
}
if(!flag)
{
QTHost.row_indices[idx] = row;
QTHost.column_indices[idx] = DirectForcingSolver<memoryType>::tags[I];
QTHost.values[idx] = DirectForcingSolver<memoryType>::coeffs[I];
idx++;
}
}
}
if(i<nx-1)
{
int I = j*(nx-1)+i;
if(DirectForcingSolver<memoryType>::tags[I]==-1)
{
QTHost.row_indices[idx] = row;
QTHost.column_indices[idx] = I;
QTHost.values[idx] = -1.0;
idx++;
}
else
{
bool flag = false;
int start;
start = (idx>4)? idx-4 : 0;
for(int l=start; l<idx && !flag; l++)
{
if(QTHost.row_indices[l]==row && QTHost.column_indices[l]==DirectForcingSolver<memoryType>::tags[I])
{
flag = true;
QTHost.values[l] -= DirectForcingSolver<memoryType>::coeffs[I];
}
}
if(!flag)
{
QTHost.row_indices[idx] = row;
QTHost.column_indices[idx] = DirectForcingSolver<memoryType>::tags[I];
QTHost.values[idx] = -DirectForcingSolver<memoryType>::coeffs[I];
idx++;
}
}
}
if(j>0)
{
int I = (j-1)*nx+i+N_u;
if(DirectForcingSolver<memoryType>::tags[I]==-1)
{
QTHost.row_indices[idx] = row;
QTHost.column_indices[idx] = I;
QTHost.values[idx] = 1.0;
idx++;
}
else
{
bool flag = false;
int start;
start = (idx>4)? idx-4 : 0;
for(int l=start; l<idx && !flag; l++)
{
if(QTHost.row_indices[l]==row && QTHost.column_indices[l]==DirectForcingSolver<memoryType>::tags[I])
{
flag = true;
QTHost.values[l] += DirectForcingSolver<memoryType>::coeffs[I];
}
}
if(!flag)
{
QTHost.row_indices[idx] = row;
QTHost.column_indices[idx] = DirectForcingSolver<memoryType>::tags[I];
QTHost.values[idx] = DirectForcingSolver<memoryType>::coeffs[I];
idx++;
}
}
}
if(j<ny-1)
{
int I = j*nx+i+N_u;
if(DirectForcingSolver<memoryType>::tags[I]==-1)
{
QTHost.row_indices[idx] = row;
QTHost.column_indices[idx] = I;
QTHost.values[idx] = -1.0;
idx++;
}
else
{
bool flag = false;
int start;
start = (idx>4)? idx-4 : 0;
for(int l=start; l<idx && !flag; l++)
{
if(QTHost.row_indices[l]==row && QTHost.column_indices[l]==DirectForcingSolver<memoryType>::tags[I])
{
flag = true;
QTHost.values[l] -= DirectForcingSolver<memoryType>::coeffs[I];
}
}
if(!flag)
{
QTHost.row_indices[idx] = row;
QTHost.column_indices[idx] = DirectForcingSolver<memoryType>::tags[I];
QTHost.values[idx] = -DirectForcingSolver<memoryType>::coeffs[I];
idx++;
}
}
}
}
}
NavierStokesSolver<memoryType>::QT = QTHost;
NavierStokesSolver<memoryType>::QT.resize(nx*ny, (nx-1)*ny+nx*(ny-1), idx);
/*std::cout << "\nQT stuff:\n";
std::cout << "Copied and resized matrix." << std::endl;
std::cout << "Original size: " << QTHost.values.size() << std::endl;
std::cout << "Actual size : " << idx << std::endl;
cusp::io::write_matrix_market_file(NavierStokesSolver<memoryType>::Q, "Q.mtx");
std::cout << "Wrote Q to file." << std::endl;
cusp::io::write_matrix_market_file(NavierStokesSolver<memoryType>::QT, "QT.mtx");
std::cout << "Wrote QT to file." << std::endl;*/
} // generateQT
/*
template <typename memoryType>
void DFImprovedSolver<memoryType>::generateC()
{
int nx = NavierStokesSolver<memoryType>::domInfo->nx,
ny = NavierStokesSolver<memoryType>::domInfo->ny;
parameterDB &db = *NavierStokesSolver<memoryType>::paramDB;
real dt = db["simulation"]["dt"].get<real>();
const int ii = 2, jj = 2;
const int N_u = (nx-1)*ny;
int isColumnNonZero[5][5];
for(int m=-2; m<=2; m++)
{
for(int l=-2; l<=2; l++)
{
isColumnNonZero[jj+m][ii+l]=0;
}
}
int num_nonzeros = 0;
for(int j=0; j<ny; j++)
{
for(int i=0; i<nx; i++)
{
if(j>0)
{
int I = (j-1)*nx+i+N_u;
if(DirectForcingSolver<memoryType>::tags[I]==-1)
{
isColumnNonZero[jj-1][ii] += 1;
isColumnNonZero[jj][ii] += 1;
}
else
{
int diff = DirectForcingSolver<memoryType>::tags[I]-I;
if(diff < -1)
{
isColumnNonZero[jj-2][ii] += 1;
isColumnNonZero[jj-1][ii] += 1;
}
else if(diff == -1)
{
isColumnNonZero[jj-1][ii-1] += 1;
isColumnNonZero[jj][ii-1] += 1;
}
else if(diff == 1)
{
isColumnNonZero[jj-1][ii+1] += 1;
isColumnNonZero[jj][ii+1] += 1;
}
else if(diff > 1)
{
isColumnNonZero[jj][ii] += 1;
isColumnNonZero[jj+1][ii] += 1;
}
}
}
if(i>0)
{
int I = j*(nx-1)+(i-1);
if(DirectForcingSolver<memoryType>::tags[I]==-1)
{
isColumnNonZero[jj][ii-1] += 1;
isColumnNonZero[jj][ii] += 1;
}
else
{
int diff = DirectForcingSolver<memoryType>::tags[I]-I;
if(diff < -1)
{
isColumnNonZero[jj-1][ii-1] += 1;
isColumnNonZero[jj-1][ii] += 1;
}
else if(diff == -1)
{
isColumnNonZero[jj][ii-2] += 1;
isColumnNonZero[jj][ii-1] += 1;
}
else if(diff == 1)
{
isColumnNonZero[jj][ii] += 1;
isColumnNonZero[jj][ii+1] += 1;
}
else if(diff > 1)
{
isColumnNonZero[jj+1][ii-1] += 1;
isColumnNonZero[jj+1][ii] += 1;
}
}
}
if(i<nx-1)
{
int I = j*(nx-1)+i;
if(DirectForcingSolver<memoryType>::tags[I]==-1)
{
isColumnNonZero[jj][ii+1] += 1;
isColumnNonZero[jj][ii] += 1;
}
else
{
int diff = DirectForcingSolver<memoryType>::tags[I]-I;
if(diff < -1)
{
isColumnNonZero[jj-1][ii+1] += 1;
isColumnNonZero[jj-1][ii] += 1;
}
else if(diff == -1)
{
isColumnNonZero[jj][ii] += 1;
isColumnNonZero[jj][ii-1] += 1;
}
else if(diff == 1)
{
isColumnNonZero[jj][ii+2] += 1;
isColumnNonZero[jj][ii+1] += 1;
}
else if(diff > 1)
{
isColumnNonZero[jj+1][ii+1] += 1;
isColumnNonZero[jj+1][ii] += 1;
}
}
}
if(j<ny-1)
{
int I = j*nx+i+N_u;
if(DirectForcingSolver<memoryType>::tags[I]==-1)
{
isColumnNonZero[jj+1][ii] += 1;
isColumnNonZero[jj][ii] += 1;
}
else
{
int diff = DirectForcingSolver<memoryType>::tags[I]-I;
if(diff < -1)
{
isColumnNonZero[jj][ii] += 1;
isColumnNonZero[jj-1][ii] += 1;
}
else if(diff == -1)
{
isColumnNonZero[jj+1][ii-1] += 1;
isColumnNonZero[jj][ii-1] += 1;
}
else if(diff == 1)
{
isColumnNonZero[jj+1][ii+1] += 1;
isColumnNonZero[jj][ii+1] += 1;
}
else if(diff > 1)
{
isColumnNonZero[jj+2][ii] += 1;
isColumnNonZero[jj+1][ii] += 1;
}
}
}
int numNonZeroColumns = 0;
//std::cout << "(" << i << "," << j << ")|";
for(int m=-2; m<=2; m++)
{
for(int l=-2; l<=2; l++)
{
//std::cout << isColumnNonZero[jj+m][ii+l] << ",";
if(isColumnNonZero[jj+m][ii+l]) numNonZeroColumns++;
isColumnNonZero[jj+m][ii+l] = 0;
}
//std::cout << "|";
}
//std::cout << numNonZeroColumns << std::endl;
num_nonzeros += numNonZeroColumns;
}
}
//std::cout << "Total nonzeros: " << num_nonzeros << std::endl;
cusp::coo_matrix<int, real, host_memory> CHost(nx*ny, nx*ny, num_nonzeros);
real valuesInColumns[5][5];
for(int m=-2; m<=2; m++)
{
for(int l=-2; l<=2; l++)
{
valuesInColumns[jj+m][ii+l]=0.0;
}
}
int idx = 0;
for(int j=0; j<ny; j++)
{
for(int i=0; i<nx; i++)
{
if(j>0)
{
int I = (j-1)*nx+i+N_u;
if(DirectForcingSolver<memoryType>::tags[I]==-1)
{
isColumnNonZero[jj-1][ii] += 1;
valuesInColumns[jj-1][ii] -= 1.0;
isColumnNonZero[jj][ii] += 1;
valuesInColumns[jj][ii] += 1.0;
}
else
{
int diff = DirectForcingSolver<memoryType>::tags[I]-I;
real xi = DirectForcingSolver<memoryType>::coeffs[I];
if(diff < -1)
{
isColumnNonZero[jj-2][ii] += 1;
valuesInColumns[jj-2][ii] -= xi;
isColumnNonZero[jj-1][ii] += 1;
valuesInColumns[jj-1][ii] += xi;
}
else if(diff == -1)
{
isColumnNonZero[jj-1][ii-1] += 1;
valuesInColumns[jj-1][ii-1] -= xi;
isColumnNonZero[jj][ii-1] += 1;
valuesInColumns[jj][ii-1] += xi;
}
else if(diff == 1)
{
isColumnNonZero[jj-1][ii+1] += 1;
valuesInColumns[jj-1][ii+1] -= xi;
isColumnNonZero[jj][ii+1] += 1;
valuesInColumns[jj][ii+1] += xi;
}
else if(diff > 1)
{
isColumnNonZero[jj][ii] += 1;
valuesInColumns[jj][ii] -= xi;
isColumnNonZero[jj+1][ii] += 1;
valuesInColumns[jj+1][ii] += xi;
}
}
}
if(i>0)
{
int I = j*(nx-1)+(i-1);
if(DirectForcingSolver<memoryType>::tags[I]==-1)
{
isColumnNonZero[jj][ii-1] += 1;
valuesInColumns[jj][ii-1] -= 1.0;
isColumnNonZero[jj][ii] += 1;
valuesInColumns[jj][ii] += 1.0;
}
else
{
int diff = DirectForcingSolver<memoryType>::tags[I]-I;
real xi = DirectForcingSolver<memoryType>::coeffs[I];
if(diff < -1)
{
isColumnNonZero[jj-1][ii-1] += 1;
valuesInColumns[jj-1][ii-1] -= xi;
isColumnNonZero[jj-1][ii] += 1;
valuesInColumns[jj-1][ii] += xi;
}
else if(diff == -1)
{
isColumnNonZero[jj][ii-2] += 1;
valuesInColumns[jj][ii-2] -= xi;
isColumnNonZero[jj][ii-1] += 1;
valuesInColumns[jj][ii-1] += xi;
}
else if(diff == 1)
{
isColumnNonZero[jj][ii] += 1;
valuesInColumns[jj][ii] -= xi;
isColumnNonZero[jj][ii+1] += 1;
valuesInColumns[jj][ii+1] += xi;
}
else if(diff > 1)
{
isColumnNonZero[jj+1][ii-1] += 1;
valuesInColumns[jj+1][ii-1] -= xi;
isColumnNonZero[jj+1][ii] += 1;
valuesInColumns[jj+1][ii] += xi;
}
}
}
if(i<nx-1)
{
int I = j*(nx-1)+i;
if(DirectForcingSolver<memoryType>::tags[I]==-1)
{
isColumnNonZero[jj][ii+1] += 1;
valuesInColumns[jj][ii+1] -= 1.0;
isColumnNonZero[jj][ii] += 1;
valuesInColumns[jj][ii] += 1.0;
}
else
{
int diff = DirectForcingSolver<memoryType>::tags[I]-I;
real xi = DirectForcingSolver<memoryType>::coeffs[I];
if(diff < -1)
{
isColumnNonZero[jj-1][ii+1] += 1;
valuesInColumns[jj-1][ii+1] -= xi;
isColumnNonZero[jj-1][ii] += 1;
valuesInColumns[jj-1][ii] += xi;
}
else if(diff == -1)
{
isColumnNonZero[jj][ii] += 1;
valuesInColumns[jj][ii] -= xi;
isColumnNonZero[jj][ii-1] += 1;
valuesInColumns[jj][ii-1] += xi;
}
else if(diff == 1)
{
isColumnNonZero[jj][ii+2] += 1;
valuesInColumns[jj][ii+2] -= xi;
isColumnNonZero[jj][ii+1] += 1;
valuesInColumns[jj][ii+1] += xi;
}
else if(diff > 1)
{
isColumnNonZero[jj+1][ii+1] += 1;
valuesInColumns[jj+1][ii+1] -= xi;
isColumnNonZero[jj+1][ii] += 1;
valuesInColumns[jj+1][ii] += xi;
}
}
}
if(j<ny-1)
{
int I = j*nx+i+N_u;
if(DirectForcingSolver<memoryType>::tags[I]==-1)
{
isColumnNonZero[jj+1][ii] += 1;
valuesInColumns[jj+1][ii] -= 1.0;
isColumnNonZero[jj][ii] += 1;
valuesInColumns[jj][ii] += 1.0;
}
else
{
int diff = DirectForcingSolver<memoryType>::tags[I]-I;
real xi = DirectForcingSolver<memoryType>::coeffs[I];
if(diff < -1)
{
isColumnNonZero[jj][ii] += 1;
valuesInColumns[jj][ii] -= xi;
isColumnNonZero[jj-1][ii] += 1;
valuesInColumns[jj-1][ii] += xi;
}
else if(diff == -1)
{
isColumnNonZero[jj+1][ii-1] += 1;
valuesInColumns[jj+1][ii-1] -= xi;
isColumnNonZero[jj][ii-1] += 1;
valuesInColumns[jj][ii-1] += xi;
}
else if(diff == 1)
{
isColumnNonZero[jj+1][ii+1] += 1;
valuesInColumns[jj+1][ii+1] -= xi;
isColumnNonZero[jj][ii+1] += 1;
valuesInColumns[jj][ii+1] += xi;
}
else if(diff > 1)
{
isColumnNonZero[jj+2][ii] += 1;
valuesInColumns[jj+2][ii] -= xi;
isColumnNonZero[jj+1][ii] += 1;
valuesInColumns[jj+1][ii] += xi;
}
}
}
int row = j*nx+i;
for(int m=-2; m<=2; m++)
{
for(int l=-2; l<=2; l++)
{
if(isColumnNonZero[jj+m][ii+l])
{
CHost.row_indices[idx] = row;
CHost.column_indices[idx] = row + m*nx + l;
CHost.values[idx] = valuesInColumns[jj+m][ii+l];
if(CHost.row_indices[idx]==(ny/2)*nx+nx/2 && CHost.row_indices[idx]==CHost.column_indices[idx])
{
CHost.values[idx]+=CHost.values[idx];
}
idx++;
}
isColumnNonZero[jj+m][ii+l] = 0;
valuesInColumns[jj+m][ii+l] = 0.0;
}
}
}
}
CHost.sort_by_row_and_column();
CHost.values[0] += CHost.values[0];
NavierStokesSolver<memoryType>::C = CHost;
//cusp::io::write_matrix_market_file(NavierStokesSolver<memoryType>::C, "C-generateC.mtx");
cusp::blas::scal(NavierStokesSolver<memoryType>::C.values, dt);
} // generateC
*/
// specialization of the class
template class DFImprovedSolver<device_memory>;
|
the_stack
|
namespace mshadow {
namespace cuda {
namespace {
// workspace variables
enum LSoftmaxTempSpaceType {kCost, kCosmt, kK, kSin2t, kFo, kCostM};
}
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
MSHADOW_XINLINE int LSPowOfMO(const int k) {
return 1 - ((k&0x01) << 1);
}
template<typename DType>
__global__ void LSCalcNorm(const Tensor<gpu, 2, DType> x,
Tensor<gpu, 1, DType> x_norm) {
const int n = x.size(0);
const int m = x.size(1);
CUDA_KERNEL_LOOP(i, n) {
DType norm = 0;
for (int j = 0; j < m; ++j) {
norm += x[i][j] * x[i][j];
}
x_norm[i] = sqrt(norm);
}
}
template<typename DType>
__device__ int LSFindK(const DType *k_table, const int n, const DType cos_t) {
const DType eps = 1e-5;
for (int i = 0; i < n; ++i) {
if (((k_table[i+1] < cos_t) || (abs(k_table[i+1] - cos_t) < eps)) &&
((k_table[i] > cos_t) || (abs(k_table[i] - cos_t) < eps))) {
return i;
}
}
return 0;
}
template<typename DType>
__device__ DType LSCalcCosmt(const DType *c_table, const int n,
const DType cos_t, const int margin) {
const DType sin2_t = 1 - cos_t * cos_t;
DType cos_t_p = pow(cos_t, margin);
DType sin2_t_p = 1;
DType cos_mt = cos_t_p; // p = 0
for (int p = 1; p <= margin / 2; ++p) {
cos_t_p /= cos_t * cos_t; // don't replace `cos_t*cos_t` with `1-sin2_t`, this can cause numeric issue if cos_t --> 0
sin2_t_p *= sin2_t;
cos_mt += LSPowOfMO(p) * c_table[2*p] * cos_t_p * sin2_t_p;
}
return cos_mt;
}
template<typename DType>
__global__ void LSoftmaxForwardKernel(const Tensor<gpu, 2, DType> x,
const Tensor<gpu, 2, DType> w,
const Tensor<gpu, 1, DType> label,
const Tensor<gpu, 1, DType> x_norm,
const Tensor<gpu, 1, DType> w_norm,
Tensor<gpu, 2, DType> out,
const Tensor<gpu, 1, DType> k_table,
const Tensor<gpu, 1, DType> c_table,
const int margin,
const DType beta) {
const int n = x.size(0);
const int feature_dim = x.size(1);
const int m = w.size(0);
CUDA_KERNEL_LOOP(i, n) {
const int yi = static_cast<int>(label[i]);
const DType fo_i_yi = out[i][yi];
const DType cos_t = fo_i_yi / (x_norm[i] * w_norm[yi]);
const int k = LSFindK(k_table.dptr_, k_table.size(0), cos_t);
const DType cos_mt = LSCalcCosmt(c_table.dptr_, c_table.size(0), cos_t, margin);
const DType f_i_yi = (LSPowOfMO(k) * cos_mt - 2*k) * (w_norm[yi] * x_norm[i]);
out[i][yi] = (f_i_yi + beta * fo_i_yi) / (1 + beta);
}
}
template<typename DType>
inline void LSoftmaxForward(const Tensor<gpu, 2, DType> &x,
const Tensor<gpu, 2, DType> &w,
const Tensor<gpu, 1, DType> &label,
const Tensor<gpu, 2, DType> &out,
const Tensor<gpu, 1, DType> &x_norm,
const Tensor<gpu, 1, DType> &w_norm,
const Tensor<gpu, 1, DType> &k_table,
const Tensor<gpu, 1, DType> &c_table,
const int margin,
const DType beta) {
const int n = x.size(0);
const int m = w.size(0);
dim3 dimBlock(kBaseThreadNum);
dim3 dimGrid((n + kBaseThreadNum - 1) / kBaseThreadNum);
LSCalcNorm<<<dimGrid, dimBlock>>>(x, x_norm);
dimGrid.x = ((m + kBaseThreadNum - 1) / kBaseThreadNum);
LSCalcNorm<<<dimGrid, dimBlock>>>(w, w_norm);
dimGrid.x = ((n + kBaseThreadNum - 1) / kBaseThreadNum);
LSoftmaxForwardKernel<<<dimGrid, dimBlock>>>(x, w, label, x_norm, w_norm, out, k_table, c_table, margin, beta);
}
template<typename DType>
__global__ void LSoftmaxBackwardRequired(const Tensor<gpu, 2, DType> x,
const Tensor<gpu, 2, DType> w,
const Tensor<gpu, 1, DType> label,
const Tensor<gpu, 1, DType> x_norm,
const Tensor<gpu, 1, DType> w_norm,
Tensor<gpu, 2, DType> workspace,
const Tensor<gpu, 1, DType> k_table,
const Tensor<gpu, 1, DType> c_table,
const int margin) {
const int n = x.size(0);
const int feature_dim = x.size(1);
CUDA_KERNEL_LOOP(i, n) {
const int yi = static_cast<int>(label[i]);
// fo_i_yi = dot(w_yi, x_i)
DType fo_i_yi = 0;
for (int p = 0; p < feature_dim; ++p) {
fo_i_yi += w[yi][p] * x[i][p];
}
const DType cos_t = fo_i_yi / (x_norm[i] * w_norm[yi]);
const int k = LSFindK(k_table.dptr_, k_table.size(0), cos_t);
const DType cos_mt = LSCalcCosmt(c_table.dptr_, c_table.size(0), cos_t, margin);
const DType sin2_t = 1 - cos_t * cos_t;
workspace[kCost][i] = cos_t;
workspace[kCosmt][i] = cos_mt;
workspace[kK][i] = static_cast<DType>(k);
workspace[kSin2t][i] = sin2_t;
workspace[kFo][i] = fo_i_yi;
workspace[kCostM][i] = pow(cos_t, margin - 1);
}
}
template<typename DType>
__global__ void LSoftmaxBackwardXKernel(const Tensor<gpu, 2, DType> x,
const Tensor<gpu, 2, DType> w,
const Tensor<gpu, 1, DType> label,
const Tensor<gpu, 1, DType> x_norm,
const Tensor<gpu, 1, DType> w_norm,
const Tensor<gpu, 2, DType> o_grad,
Tensor<gpu, 2, DType> x_grad,
const Tensor<gpu, 2, DType> workspace,
const Tensor<gpu, 1, DType> c_table,
const int margin,
const DType beta) {
const int nthreads = x.size(0) * x.size(1);
const int feature_dim = x.size(1);
CUDA_KERNEL_LOOP(idx, nthreads) {
const int i = idx / feature_dim;
const int l = idx % feature_dim;
const int yi = static_cast<int>(label[i]);
const DType cos_t = workspace[kCost][i];
const DType cos_mt = workspace[kCosmt][i];
const int k = static_cast<int>(workspace[kK][i]);
const DType sin2_t = workspace[kSin2t][i];
const DType fo_i_yi = workspace[kFo][i];
const DType w_norm_yi = w_norm[yi];
const DType x_norm_i = x_norm[i];
const DType dcos_dx = w[yi][l] / (w_norm_yi * x_norm_i) - \
fo_i_yi * x[i][l] / (w_norm_yi * x_norm_i * x_norm_i * x_norm_i);
const DType dsin2_dx = -2 * cos_t * dcos_dx;
DType cos_t_p = workspace[kCostM][i];
DType sin2_t_p = 1;
DType dcosm_dx = margin * cos_t_p * dcos_dx; // p = 0
for (int p = 1; p <= margin / 2; ++p) {
cos_t_p /= cos_t * cos_t;
dcosm_dx += LSPowOfMO(p) * c_table[2*p] * (p * cos_t * dsin2_dx + \
(margin - 2*p) * sin2_t * dcos_dx) * cos_t_p * sin2_t_p;
sin2_t_p *= sin2_t;
}
const DType df_dx = (LSPowOfMO(k) * cos_mt - 2*k) * w_norm_yi / x_norm_i * x[i][l] + \
LSPowOfMO(k) * w_norm_yi * x_norm_i * dcosm_dx;
const DType alpha = 1 / (1 + beta);
x_grad[i][l] += alpha * o_grad[i][yi] * (df_dx - w[yi][l]);
}
}
template<typename DType>
__global__ void LSoftmaxBackwardWKernel(const Tensor<gpu, 2, DType> x,
const Tensor<gpu, 2, DType> w,
const Tensor<gpu, 1, DType> label,
const Tensor<gpu, 1, DType> x_norm,
const Tensor<gpu, 1, DType> w_norm,
const Tensor<gpu, 2, DType> o_grad,
Tensor<gpu, 2, DType> w_grad,
const Tensor<gpu, 2, DType> workspace,
const Tensor<gpu, 1, DType> c_table,
const int margin,
const DType beta) {
const int nthreads = w.size(0) * w.size(1);
const int n = x.size(0);
const int feature_dim = w.size(1);
CUDA_KERNEL_LOOP(idx, nthreads) {
const int j = idx / feature_dim;
const int l = idx % feature_dim;
DType dw = 0;
for (int i = 0; i < n; ++i) {
const int yi = static_cast<int>(label[i]);
if (yi == j) {
const DType cos_t = workspace[kCost][i];
const DType cos_mt = workspace[kCosmt][i];
const int k = static_cast<int>(workspace[kK][i]);
const DType sin2_t = workspace[kSin2t][i];
const DType fo_i_yi = workspace[kFo][i];
const DType x_norm_i = x_norm[i];
const DType w_norm_yi = w_norm[yi];
const DType dcos_dw = x[i][l] / (w_norm_yi * x_norm_i) - \
fo_i_yi * w[yi][l] / (x_norm_i * w_norm_yi * w_norm_yi * w_norm_yi);
const DType dsin2_dw = -2 * cos_t * dcos_dw;
DType cos_t_p = workspace[kCostM][i];
DType sin2_t_p = 1;
DType dcosm_dw = margin * cos_t_p * dcos_dw; // p = 0
for (int p = 1; p <= margin / 2; ++p) {
cos_t_p /= cos_t * cos_t;
dcosm_dw += LSPowOfMO(p) * c_table[2*p] * (p * cos_t * dsin2_dw + \
(margin - 2*p) * sin2_t * dcos_dw) * cos_t_p * sin2_t_p;
sin2_t_p *= sin2_t;
}
const DType df_dw_j = (LSPowOfMO(k) * cos_mt - 2*k) * x_norm_i / w_norm_yi * w[yi][l] + \
LSPowOfMO(k) * w_norm_yi * x_norm_i * dcosm_dw;
dw += o_grad[i][yi] * (df_dw_j - x[i][l]);
}
}
const DType alpha = 1 / (1 + beta);
w_grad[j][l] += alpha * dw;
}
}
template<typename DType>
inline void LSoftmaxBackward(const Tensor<gpu, 2, DType> &x,
const Tensor<gpu, 2, DType> &w,
const Tensor<gpu, 1, DType> &label,
const Tensor<gpu, 1, DType> &x_norm,
const Tensor<gpu, 1, DType> &w_norm,
const Tensor<gpu, 2, DType> &o_grad,
const Tensor<gpu, 2, DType> &x_grad,
const Tensor<gpu, 2, DType> &w_grad,
const Tensor<gpu, 2, DType> &workspace,
const Tensor<gpu, 1, DType> &k_table,
const Tensor<gpu, 1, DType> &c_table,
const int margin,
const DType beta) {
const int n = x.size(0);
const int feature_dim = x.size(1);
const int m = w.size(0);
dim3 dimBlock(kBaseThreadNum);
dim3 dimGrid((n + kBaseThreadNum - 1) / kBaseThreadNum);
LSoftmaxBackwardRequired<<<dimGrid, dimBlock>>>(x, w, label, x_norm, w_norm, workspace,
k_table, c_table, margin);
dimGrid.x = ((n * feature_dim + kBaseThreadNum - 1) / kBaseThreadNum);
LSoftmaxBackwardXKernel<<<dimGrid, dimBlock>>>(x, w, label, x_norm, w_norm, o_grad, x_grad, workspace,
c_table, margin, beta);
dimGrid.x = ((m * feature_dim + kBaseThreadNum - 1) / kBaseThreadNum);
LSoftmaxBackwardWKernel<<<dimGrid, dimBlock>>>(x, w, label, x_norm, w_norm, o_grad, w_grad, workspace,
c_table, margin, beta);
}
} // namespace cuda
template<typename DType>
inline void LSoftmaxForward(const Tensor<gpu, 2, DType> &x,
const Tensor<gpu, 2, DType> &w,
const Tensor<gpu, 1, DType> &label,
const Tensor<gpu, 2, DType> &out,
const Tensor<gpu, 1, DType> &x_norm,
const Tensor<gpu, 1, DType> &w_norm,
const Tensor<gpu, 1, DType> &k_table,
const Tensor<gpu, 1, DType> &c_table,
const int margin,
const DType beta) {
cuda::LSoftmaxForward(x, w, label, out, x_norm, w_norm,
k_table, c_table, margin, beta);
}
template<typename DType>
inline void LSoftmaxBackward(const Tensor<gpu, 2, DType> &x,
const Tensor<gpu, 2, DType> &w,
const Tensor<gpu, 1, DType> &label,
const Tensor<gpu, 1, DType> &x_norm,
const Tensor<gpu, 1, DType> &w_norm,
const Tensor<gpu, 2, DType> &o_grad,
const Tensor<gpu, 2, DType> &x_grad,
const Tensor<gpu, 2, DType> &w_grad,
const Tensor<gpu, 2, DType> &workspace,
const Tensor<gpu, 1, DType> &k_table,
const Tensor<gpu, 1, DType> &c_table,
const int margin,
const DType beta) {
cuda::LSoftmaxBackward(x, w, label, x_norm, w_norm, o_grad, x_grad, w_grad, workspace,
k_table, c_table, margin, beta);
}
} // namespace mshadow
namespace mxnet {
namespace op {
template<>
Operator *CreateOp<gpu>(LSoftmaxParam param, int dtype) {
Operator *op = NULL;
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
op = new LSoftmaxOp<gpu, DType>(param);
})
return op;
}
} // namespace op
} // namespace mxnet
|
the_stack
|
* Kernel utilities for loading tiles of data through global memory
* with cache modifiers
******************************************************************************/
#pragma once
#include "../../util/operators.cuh"
#include "../../util/vector_types.cuh"
#include "../../util/io/modified_load.cuh"
B40C_NS_PREFIX
namespace b40c {
namespace util {
namespace io {
/**
* Load a tile of items
*/
template <
int LOG_LOADS_PER_TILE, // Number of vector loads (log)
int LOG_LOAD_VEC_SIZE, // Number of items per vector load (log)
int ACTIVE_THREADS, // Active threads that will be loading
ld::CacheModifier CACHE_MODIFIER, // Cache modifier (e.g., CA/CG/CS/NONE/etc.)
bool CHECK_ALIGNMENT> // Whether or not to check alignment to see if vector loads can be used
struct LoadTile
{
enum {
LOADS_PER_TILE = 1 << LOG_LOADS_PER_TILE,
LOAD_VEC_SIZE = 1 << LOG_LOAD_VEC_SIZE,
LOG_ELEMENTS_PER_THREAD = LOG_LOADS_PER_TILE + LOG_LOAD_VEC_SIZE,
ELEMENTS_PER_THREAD = 1 << LOG_ELEMENTS_PER_THREAD,
TILE_SIZE = ACTIVE_THREADS * ELEMENTS_PER_THREAD,
};
//---------------------------------------------------------------------
// Iteration Structures
//---------------------------------------------------------------------
template <int LOAD, int VEC, int dummy = 0> struct Iterate;
/**
* First vec element of a vector-load
*/
template <int LOAD, int dummy>
struct Iterate<LOAD, 0, dummy>
{
// Vector (unguarded)
template <typename T, void Transform(T&), typename VectorType>
static __device__ __forceinline__ void LoadVector(
T data[][LOAD_VEC_SIZE],
VectorType vectors[],
VectorType *d_in_vectors)
{
ModifiedLoad<CACHE_MODIFIER>::Ld(vectors[LOAD], d_in_vectors);
Transform(data[LOAD][0]); // Apply transform function with in_bounds = true
Iterate<LOAD, 1>::template LoadVector<T, Transform>(
data, vectors, d_in_vectors);
}
// Regular (unguarded)
template <typename T, void Transform(T&)>
static __device__ __forceinline__ void LoadValid(
T data[][LOAD_VEC_SIZE],
T *d_in)
{
int thread_offset = (threadIdx.x << LOG_LOAD_VEC_SIZE) + (LOAD * ACTIVE_THREADS * LOAD_VEC_SIZE) + 0;
ModifiedLoad<CACHE_MODIFIER>::Ld(data[LOAD][0], d_in + thread_offset);
Transform(data[LOAD][0]);
Iterate<LOAD, 1>::template LoadValid<T, Transform>(
data, d_in);
}
// Regular (guarded)
template <typename T, void Transform(T&), typename SizeT>
static __device__ __forceinline__ void LoadValid(
T data[][LOAD_VEC_SIZE],
T *d_in,
const SizeT &guarded_elements)
{
SizeT thread_offset = (threadIdx.x << LOG_LOAD_VEC_SIZE) + (LOAD * ACTIVE_THREADS * LOAD_VEC_SIZE) + 0;
if (thread_offset < guarded_elements) {
ModifiedLoad<CACHE_MODIFIER>::Ld(data[LOAD][0], d_in + thread_offset);
Transform(data[LOAD][0]);
}
Iterate<LOAD, 1>::template LoadValid<T, Transform>(
data, d_in, guarded_elements);
}
// Regular (guarded with out-of-bounds default)
template <typename T, void Transform(T&), typename SizeT>
static __device__ __forceinline__ void LoadValid(
T data[][LOAD_VEC_SIZE],
T oob_default,
T *d_in,
const SizeT &guarded_elements)
{
SizeT thread_offset = (threadIdx.x << LOG_LOAD_VEC_SIZE) + (LOAD * ACTIVE_THREADS * LOAD_VEC_SIZE) + 0;
if (thread_offset < guarded_elements) {
ModifiedLoad<CACHE_MODIFIER>::Ld(data[LOAD][0], d_in + thread_offset);
Transform(data[LOAD][0]);
} else {
data[LOAD][0] = oob_default;
}
Iterate<LOAD, 1>::template LoadValid<T, Transform>(
data, oob_default, d_in, guarded_elements);
}
};
/**
* Next vec element of a vector-load
*/
template <int LOAD, int VEC, int dummy>
struct Iterate
{
// Vector (unguarded)
template <typename T, void Transform(T&), typename VectorType>
static __device__ __forceinline__ void LoadVector(
T data[][LOAD_VEC_SIZE],
VectorType vectors[],
VectorType *d_in_vectors)
{
Transform(data[LOAD][VEC]);
Iterate<LOAD, VEC + 1>::template LoadVector<T, Transform>(
data, vectors, d_in_vectors);
}
// Regular (unguarded)
template <typename T, void Transform(T&)>
static __device__ __forceinline__ void LoadValid(
T data[][LOAD_VEC_SIZE],
T *d_in)
{
int thread_offset = (threadIdx.x << LOG_LOAD_VEC_SIZE) + (LOAD * ACTIVE_THREADS * LOAD_VEC_SIZE) + VEC;
ModifiedLoad<CACHE_MODIFIER>::Ld(data[LOAD][VEC], d_in + thread_offset);
Transform(data[LOAD][VEC]);
Iterate<LOAD, VEC + 1>::template LoadValid<T, Transform>(
data, d_in);
}
// Regular (guarded)
template <typename T, void Transform(T&), typename SizeT>
static __device__ __forceinline__ void LoadValid(
T data[][LOAD_VEC_SIZE],
T *d_in,
const SizeT &guarded_elements)
{
SizeT thread_offset = (threadIdx.x << LOG_LOAD_VEC_SIZE) + (LOAD * ACTIVE_THREADS * LOAD_VEC_SIZE) + VEC;
if (thread_offset < guarded_elements) {
ModifiedLoad<CACHE_MODIFIER>::Ld(data[LOAD][VEC], d_in + thread_offset);
Transform(data[LOAD][VEC]);
}
Iterate<LOAD, VEC + 1>::template LoadValid<T, Transform>(
data, d_in, guarded_elements);
}
// Regular (guarded with out-of-bounds default)
template <typename T, void Transform(T&), typename SizeT>
static __device__ __forceinline__ void LoadValid(
T data[][LOAD_VEC_SIZE],
T oob_default,
T *d_in,
const SizeT &guarded_elements)
{
SizeT thread_offset = (threadIdx.x << LOG_LOAD_VEC_SIZE) + (LOAD * ACTIVE_THREADS * LOAD_VEC_SIZE) + VEC;
if (thread_offset < guarded_elements) {
ModifiedLoad<CACHE_MODIFIER>::Ld(data[LOAD][VEC], d_in + thread_offset);
Transform(data[LOAD][VEC]);
} else {
data[LOAD][VEC] = oob_default;
}
Iterate<LOAD, VEC + 1>::template LoadValid<T, Transform>(
data, oob_default, d_in, guarded_elements);
}
};
/**
* Next load
*/
template <int LOAD, int dummy>
struct Iterate<LOAD, LOAD_VEC_SIZE, dummy>
{
// Vector (unguarded)
template <typename T, void Transform(T&), typename VectorType>
static __device__ __forceinline__ void LoadVector(
T data[][LOAD_VEC_SIZE],
VectorType vectors[],
VectorType *d_in_vectors)
{
Iterate<LOAD + 1, 0>::template LoadVector<T, Transform>(
data, vectors, d_in_vectors + ACTIVE_THREADS);
}
// Regular (unguarded)
template <typename T, void Transform(T&)>
static __device__ __forceinline__ void LoadValid(
T data[][LOAD_VEC_SIZE],
T *d_in)
{
Iterate<LOAD + 1, 0>::template LoadValid<T, Transform>(
data, d_in);
}
// Regular (guarded)
template <typename T, void Transform(T&), typename SizeT>
static __device__ __forceinline__ void LoadValid(
T data[][LOAD_VEC_SIZE],
T *d_in,
const SizeT &guarded_elements)
{
Iterate<LOAD + 1, 0>::template LoadValid<T, Transform>(
data, d_in, guarded_elements);
}
// Regular (guarded with out-of-bounds default)
template <typename T, void Transform(T&), typename SizeT>
static __device__ __forceinline__ void LoadValid(
T data[][LOAD_VEC_SIZE],
T oob_default,
T *d_in,
const SizeT &guarded_elements)
{
Iterate<LOAD + 1, 0>::template LoadValid<T, Transform>(
data, oob_default, d_in, guarded_elements);
}
};
/**
* Terminate
*/
template <int dummy>
struct Iterate<LOADS_PER_TILE, 0, dummy>
{
// Vector (unguarded)
template <typename T, void Transform(T&), typename VectorType>
static __device__ __forceinline__ void LoadVector(
T data[][LOAD_VEC_SIZE],
VectorType vectors[],
VectorType *d_in_vectors) {}
// Regular (unguarded)
template <typename T, void Transform(T&)>
static __device__ __forceinline__ void LoadValid(
T data[][LOAD_VEC_SIZE],
T *d_in) {}
// Regular (guarded)
template <typename T, void Transform(T&), typename SizeT>
static __device__ __forceinline__ void LoadValid(
T data[][LOAD_VEC_SIZE],
T *d_in,
const SizeT &guarded_elements) {}
// Regular (guarded with out-of-bounds default)
template <typename T, void Transform(T&), typename SizeT>
static __device__ __forceinline__ void LoadValid(
T data[][LOAD_VEC_SIZE],
T oob_default,
T *d_in,
const SizeT &guarded_elements) {}
};
//---------------------------------------------------------------------
// Interface
//---------------------------------------------------------------------
/**
* Load a full tile with transform
*/
template <
typename T,
void Transform(T&),
typename SizeT>
static __device__ __forceinline__ void LoadValid(
T data[][LOAD_VEC_SIZE],
T *d_in,
SizeT cta_offset)
{
const size_t MASK = ((sizeof(T) * 8 * LOAD_VEC_SIZE) - 1);
if ((CHECK_ALIGNMENT) && (LOAD_VEC_SIZE > 1) && (((size_t) d_in) & MASK)) {
Iterate<0, 0>::template LoadValid<T, Transform>(
data, d_in + cta_offset);
} else {
// Use an aliased pointer to keys array to perform built-in vector loads
typedef typename VecType<T, LOAD_VEC_SIZE>::Type VectorType;
VectorType *vectors = (VectorType *) data;
VectorType *d_in_vectors = (VectorType *) (d_in + cta_offset + (threadIdx.x << LOG_LOAD_VEC_SIZE));
Iterate<0, 0>::template LoadVector<T, Transform>(
data, vectors, d_in_vectors);
}
}
/**
* Load a full tile
*/
template <
typename T,
typename SizeT>
static __device__ __forceinline__ void LoadValid(
T data[][LOAD_VEC_SIZE],
T *d_in,
SizeT cta_offset)
{
LoadValid<T, Operators<T>::NopTransform>(data, d_in, cta_offset);
}
/**
* Load guarded_elements of a tile with transform and out-of-bounds default
*/
template <
typename T,
void Transform(T&),
typename SizeT>
static __device__ __forceinline__ void LoadValid(
T data[][LOAD_VEC_SIZE],
T *d_in,
SizeT cta_offset,
const SizeT &guarded_elements,
T oob_default)
{
if (guarded_elements >= TILE_SIZE) {
LoadValid<T, Transform>(data, d_in, cta_offset);
} else {
Iterate<0, 0>::template LoadValid<T, Transform>(
data, oob_default, d_in + cta_offset, guarded_elements);
}
}
/**
* Load guarded_elements of a tile with transform
*/
template <
typename T,
void Transform(T&),
typename SizeT>
static __device__ __forceinline__ void LoadValid(
T data[][LOAD_VEC_SIZE],
T *d_in,
SizeT cta_offset,
const SizeT &guarded_elements)
{
if (guarded_elements >= TILE_SIZE) {
LoadValid<T, Transform>(data, d_in, cta_offset);
} else {
Iterate<0, 0>::template LoadValid<T, Transform>(
data, d_in + cta_offset, guarded_elements);
}
}
/**
* Load guarded_elements of a tile and out_of_bounds default
*/
template <
typename T,
typename SizeT>
static __device__ __forceinline__ void LoadValid(
T data[][LOAD_VEC_SIZE],
T *d_in,
SizeT cta_offset,
const SizeT &guarded_elements,
T oob_default)
{
LoadValid<T, Operators<T>::NopTransform>(
data, d_in, cta_offset, guarded_elements, oob_default);
}
/**
* Load guarded_elements of a tile
*/
template <
typename T,
typename SizeT>
static __device__ __forceinline__ void LoadValid(
T data[][LOAD_VEC_SIZE],
T *d_in,
SizeT cta_offset,
const SizeT &guarded_elements)
{
LoadValid<T, Operators<T>::NopTransform, int>(
data, d_in, cta_offset, guarded_elements);
}
};
} // namespace io
} // namespace util
} // namespace b40c
B40C_NS_POSTFIX
|
the_stack
|
#include <cstdio>
#include <cstdint>
#include <cassert>
#include "cuda_runtime.h"
#include "cudabsp.h"
#include "bsp_shared.h"
#include "cudamatrix.h"
#include "cudautils.h"
//static inline __device__ __host__ void print_face(
// BSP::DFace* pFace,
// size_t faceIndex
// ) {
//
// //char* c = reinterpret_cast<char*>(pFace);
//
// printf(
// "Face %u: \n"
// //"\t first 8 bytes: %x %x %x %x %x %x %x %x\n"
// "\t addr: %p\n"
// "\t firstEdge addr: %p\n"
// "\t planeNum: %u\n"
// "\t side: %u\n"
// "\t onNode: %u\n"
// "\t firstEdge: %d\n"
// "\t numEdges: %d\n"
// "\t texInfo: %d\n"
// "\t dispInfo: %d\n"
// "\t surfaceFogVolumeID: %d\n"
// "\t styles: %x, %x, %x, %x\n"
// "\t lightOffset: %d\n"
// "\t area: %f\n"
// "\t mins: (%d, %d)\n"
// "\t size: %d x %d\n"
// "\t origFace: %d\n"
// "\t numPrims: %u\n"
// "\t firstPrimID: %u\n"
// "\t smoothingGroups: %x\n",
// static_cast<unsigned int>(faceIndex),
// //static_cast<int>(c[0]),
// //static_cast<int>(c[1]),
// //static_cast<int>(c[2]),
// //static_cast<int>(c[3]),
// //static_cast<int>(c[4]),
// //static_cast<int>(c[5]),
// //static_cast<int>(c[6]),
// //static_cast<int>(c[7]),
// pFace,
// &pFace->firstEdge,
// static_cast<unsigned int>(pFace->planeNum),
// static_cast<unsigned int>(pFace->side),
// static_cast<unsigned int>(pFace->onNode),
// static_cast<int>(pFace->firstEdge),
// static_cast<int>(pFace->numEdges),
// static_cast<int>(pFace->texInfo),
// static_cast<int>(pFace->dispInfo),
// static_cast<int>(pFace->surfaceFogVolumeID),
// static_cast<int>(pFace->styles[0]),
// static_cast<int>(pFace->styles[1]),
// static_cast<int>(pFace->styles[2]),
// static_cast<int>(pFace->styles[3]),
// static_cast<int>(pFace->lightOffset),
// pFace->area,
// static_cast<int>(pFace->lightmapTextureMinsInLuxels[0]),
// static_cast<int>(pFace->lightmapTextureMinsInLuxels[1]),
// static_cast<int>(pFace->lightmapTextureSizeInLuxels[0]),
// static_cast<int>(pFace->lightmapTextureSizeInLuxels[1]),
// static_cast<int>(pFace->origFace),
// static_cast<unsigned int>(pFace->numPrims),
// static_cast<unsigned int>(pFace->firstPrimID),
// static_cast<int>(pFace->smoothingGroups)
// );
//}
namespace CUDABSP {
__device__ BSP::RGBExp32 rgbexp32_from_float3(float3 color) {
const float EPSILON = 1e-3f;
if ((EPSILON <= color.x && color.x < 1.0f)
&& (EPSILON <= color.y && color.y < 1.0f)
&& (EPSILON <= color.z && color.z < 1.0f)) {
int8_t exp = 0;
while ((EPSILON <= color.x && color.x < 1.0f)
|| (EPSILON <= color.y && color.y < 1.0f)
|| (EPSILON <= color.z && color.z < 1.0f)) {
color *= 2.0f;
exp--;
}
return BSP::RGBExp32 {
static_cast<uint8_t>(color.x),
static_cast<uint8_t>(color.y),
static_cast<uint8_t>(color.z),
exp,
};
}
else {
uint64_t r = static_cast<uint64_t>(color.x);
uint64_t g = static_cast<uint64_t>(color.y);
uint64_t b = static_cast<uint64_t>(color.z);
int8_t exp = 0;
while (r > 255 || g > 255 || b > 255) {
r >>= 1;
g >>= 1;
b >>= 1;
exp++;
}
return BSP::RGBExp32 {
static_cast<uint8_t>(r),
static_cast<uint8_t>(g),
static_cast<uint8_t>(b),
exp
};
}
}
__device__ int16_t cluster_for_pos(
const CUDABSP& cudaBSP, float3 pos
) {
return BSPShared::cluster_for_pos(
cudaBSP.planes, cudaBSP.nodes, cudaBSP.leaves,
pos
);
}
__device__ uint8_t* pvs_for_pos(const CUDABSP& cudaBSP, float3 pos) {
int16_t cluster = cluster_for_pos(cudaBSP, pos);
if (cluster == -1) {
// Failed to find a cluster... now what do we do???
return nullptr;
}
size_t numClusters = cudaBSP.numVisClusters;
size_t pvsRowSize = sizeof(uint8_t) * div_ceil(numClusters, 8);
return cudaBSP.visMatrix + pvsRowSize * cluster;
}
__device__ bool cluster_in_pvs(
int16_t cluster, uint8_t* pvs, size_t numClusters
) {
if (pvs == nullptr || static_cast<size_t>(cluster) >= numClusters) {
// If we don't have a valid PVS, assume we're outside the world
// and therefore can see everything.
return true;
}
size_t byteIndex = cluster / 8;
size_t bitIndex = cluster % 8;
uint8_t byte = pvs[byteIndex];
return ((byte >> bitIndex) & 0x1) != 0x0;
}
__global__ void map_lightsamples(
float3* lightSamples,
BSP::RGBExp32* rgbExp32LightSamples,
size_t numLightSamples
) {
size_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numLightSamples) {
return;
}
float3 sample = lightSamples[index];
rgbExp32LightSamples[index] = rgbexp32_from_float3(sample);
}
CUDABSP* make_cudabsp(const BSP::BSP& bsp) {
CUDABSP cudaBSP;
// To detect corruption.
cudaBSP.tag = TAG;
/* Compute the sizes of all the necessary arrays. */
cudaBSP.numModels = bsp.get_models().size();
cudaBSP.numPlanes = bsp.get_planes().size();
cudaBSP.numVertices = bsp.get_vertices().size();
cudaBSP.numEdges = bsp.get_edges().size();
cudaBSP.numSurfEdges = bsp.get_surfedges().size();
cudaBSP.numFaces = bsp.get_dfaces().size();
cudaBSP.numLightSamples = bsp.get_lightsamples().size();
cudaBSP.numTexInfos = bsp.get_texinfos().size();
cudaBSP.numTexDatas = bsp.get_texdatas().size();
cudaBSP.numNodes = bsp.get_nodes().size();
cudaBSP.numLeaves = bsp.get_leaves().size();
cudaBSP.numAmbientLightSamples = bsp.get_ambient_samples().size();
cudaBSP.numWorldLights = bsp.get_worldlights().size();
cudaBSP.numVisClusters = bsp.get_visibility().size();
size_t modelsSize = sizeof(BSP::DModel) * cudaBSP.numModels;
size_t planesSize = sizeof(BSP::DPlane) * cudaBSP.numPlanes;
size_t verticesSize = sizeof(float3) * cudaBSP.numVertices;
size_t edgesSize = sizeof(BSP::DEdge) * cudaBSP.numEdges;
size_t surfEdgesSize = sizeof(int32_t) * cudaBSP.numSurfEdges;
size_t facesSize = sizeof(BSP::DFace) * cudaBSP.numFaces;
size_t lightSamplesSize
= sizeof(float3) * cudaBSP.numLightSamples;
size_t rgbExp32LightSamplesSize
= sizeof(BSP::RGBExp32) * cudaBSP.numLightSamples;
size_t texInfosSize = sizeof(BSP::TexInfo) * cudaBSP.numTexInfos;
size_t texDatasSize = sizeof(BSP::DTexData) * cudaBSP.numTexDatas;
size_t nodesSize = sizeof(BSP::DNode) * cudaBSP.numNodes;
size_t leavesSize = sizeof(BSP::DLeaf) * cudaBSP.numLeaves;
size_t ambientIndicesSize
= sizeof(BSP::DLeafAmbientIndex) * cudaBSP.numLeaves;
size_t ambientLightSamplesSize
= sizeof(BSP::DLeafAmbientLighting)
* cudaBSP.numAmbientLightSamples;
size_t worldLightsSize
= sizeof(BSP::DWorldLight) * cudaBSP.numWorldLights;
size_t visMatrixSize = sizeof(uint8_t)
* div_ceil(cudaBSP.numVisClusters, 8)
* cudaBSP.numVisClusters;
/* Copy the BSP's data to device memory. */
CUDA_CHECK_ERROR(cudaMalloc(&cudaBSP.models, modelsSize));
CUDA_CHECK_ERROR(
cudaMemcpy(
cudaBSP.models, bsp.get_models().data(), modelsSize,
cudaMemcpyHostToDevice
)
);
CUDA_CHECK_ERROR(cudaMalloc(&cudaBSP.planes, planesSize));
CUDA_CHECK_ERROR(
cudaMemcpy(
cudaBSP.planes, bsp.get_planes().data(), planesSize,
cudaMemcpyHostToDevice
)
);
CUDA_CHECK_ERROR(cudaMalloc(&cudaBSP.vertices, verticesSize));
CUDA_CHECK_ERROR(
cudaMemcpy(
cudaBSP.vertices, bsp.get_vertices().data(),
verticesSize,
cudaMemcpyHostToDevice
)
);
CUDA_CHECK_ERROR(cudaMalloc(&cudaBSP.edges, edgesSize));
CUDA_CHECK_ERROR(
cudaMemcpy(
cudaBSP.edges, bsp.get_edges().data(),
edgesSize,
cudaMemcpyHostToDevice
)
);
CUDA_CHECK_ERROR(cudaMalloc(&cudaBSP.surfEdges, surfEdgesSize));
CUDA_CHECK_ERROR(
cudaMemcpy(
cudaBSP.surfEdges, bsp.get_surfedges().data(),
surfEdgesSize,
cudaMemcpyHostToDevice
)
);
CUDA_CHECK_ERROR(cudaMalloc(&cudaBSP.faces, facesSize));
CUDA_CHECK_ERROR(
cudaMemcpy(
cudaBSP.faces, bsp.get_dfaces().data(),
facesSize,
cudaMemcpyHostToDevice
)
);
/* Special routine for st/xyz matrices */
std::vector<CUDAMatrix::CUDAMatrix<double, 3, 3>> xyzMatrices;
size_t xyzMatricesSize
= sizeof(CUDAMatrix::CUDAMatrix<double, 3, 3>) * cudaBSP.numFaces;
for (const BSP::Face& face : bsp.get_faces()) {
const gmtl::Matrix<double, 3, 3>& xyzMatrix
= face.get_st_xyz_matrix();
xyzMatrices.push_back(CUDAMatrix::CUDAMatrix<double, 3, 3>());
for (int row=0; row<3; row++) {
for (int col=0; col<3; col++) {
xyzMatrices.back()[row][col] = xyzMatrix[row][col];
}
}
}
assert(
xyzMatricesSize ==
xyzMatrices.size() * sizeof(CUDAMatrix::CUDAMatrix<double, 3, 3>)
);
CUDA_CHECK_ERROR(cudaMalloc(&cudaBSP.xyzMatrices, xyzMatricesSize));
CUDA_CHECK_ERROR(
cudaMemcpy(
cudaBSP.xyzMatrices, xyzMatrices.data(),
xyzMatricesSize,
cudaMemcpyHostToDevice
)
);
CUDA_CHECK_ERROR(
cudaMalloc(
&cudaBSP.lightSamples,
lightSamplesSize
)
);
// Don't need to copy light samples since we're computing them.
CUDA_CHECK_ERROR(
cudaMalloc(
&cudaBSP.rgbExp32LightSamples,
rgbExp32LightSamplesSize
)
);
// Don't need to copy light samples since we're computing them.
CUDA_CHECK_ERROR(cudaMalloc(&cudaBSP.texInfos, texInfosSize));
CUDA_CHECK_ERROR(
cudaMemcpy(
cudaBSP.texInfos, bsp.get_texinfos().data(),
texInfosSize,
cudaMemcpyHostToDevice
)
);
CUDA_CHECK_ERROR(cudaMalloc(&cudaBSP.texDatas, texDatasSize));
CUDA_CHECK_ERROR(
cudaMemcpy(
cudaBSP.texDatas, bsp.get_texdatas().data(),
texDatasSize,
cudaMemcpyHostToDevice
)
);
CUDA_CHECK_ERROR(cudaMalloc(&cudaBSP.nodes, nodesSize));
CUDA_CHECK_ERROR(
cudaMemcpy(
cudaBSP.nodes, bsp.get_nodes().data(),
nodesSize,
cudaMemcpyHostToDevice
)
);
CUDA_CHECK_ERROR(cudaMalloc(&cudaBSP.leaves, leavesSize));
CUDA_CHECK_ERROR(
cudaMemcpy(
cudaBSP.leaves, bsp.get_leaves().data(),
leavesSize,
cudaMemcpyHostToDevice
)
);
CUDA_CHECK_ERROR(
cudaMalloc(&cudaBSP.ambientIndices, ambientIndicesSize)
);
CUDA_CHECK_ERROR(
cudaMemcpy(
cudaBSP.ambientIndices, bsp.get_ambient_indices().data(),
ambientIndicesSize,
cudaMemcpyHostToDevice
)
);
CUDA_CHECK_ERROR(
cudaMalloc(&cudaBSP.ambientLightSamples, ambientLightSamplesSize)
);
CUDA_CHECK_ERROR(
cudaMemcpy(
cudaBSP.ambientLightSamples, bsp.get_ambient_samples().data(),
ambientLightSamplesSize,
cudaMemcpyHostToDevice
)
);
CUDA_CHECK_ERROR(cudaMalloc(&cudaBSP.worldLights, worldLightsSize));
CUDA_CHECK_ERROR(
cudaMemcpy(
cudaBSP.worldLights, bsp.get_worldlights().data(),
worldLightsSize,
cudaMemcpyHostToDevice
)
);
/* Flatten the visibility matrix */
std::vector<uint8_t> flattenedVisMatrix;
for (const std::vector<uint8_t>& clusterVis : bsp.get_visibility()) {
assert(clusterVis.size() == div_ceil(cudaBSP.numVisClusters, 8));
for (uint8_t pvsByte : clusterVis) {
flattenedVisMatrix.push_back(pvsByte);
}
}
CUDA_CHECK_ERROR(cudaMalloc(&cudaBSP.visMatrix, visMatrixSize));
CUDA_CHECK_ERROR(
cudaMemcpy(
cudaBSP.visMatrix, flattenedVisMatrix.data(),
visMatrixSize,
cudaMemcpyHostToDevice
)
);
/* Copy the CUDABSP structure to device memory. */
CUDABSP* pCudaBSP;
CUDA_CHECK_ERROR(cudaMalloc(&pCudaBSP, sizeof(CUDABSP)));
CUDA_CHECK_ERROR(
cudaMemcpy(
pCudaBSP, &cudaBSP, sizeof(CUDABSP),
cudaMemcpyHostToDevice
)
);
return pCudaBSP;
}
void destroy_cudabsp(CUDABSP* pCudaBSP) {
/* We need a host copy of this to access the internal pointers. */
CUDABSP cudaBSP;
CUDA_CHECK_ERROR(
cudaMemcpy(
&cudaBSP, pCudaBSP, sizeof(CUDABSP),
cudaMemcpyDeviceToHost
)
);
/* Free the internal arrays. */
CUDA_CHECK_ERROR(cudaFree(cudaBSP.models));
CUDA_CHECK_ERROR(cudaFree(cudaBSP.planes));
CUDA_CHECK_ERROR(cudaFree(cudaBSP.vertices));
CUDA_CHECK_ERROR(cudaFree(cudaBSP.edges));
CUDA_CHECK_ERROR(cudaFree(cudaBSP.surfEdges));
CUDA_CHECK_ERROR(cudaFree(cudaBSP.faces));
CUDA_CHECK_ERROR(cudaFree(cudaBSP.xyzMatrices));
CUDA_CHECK_ERROR(cudaFree(cudaBSP.lightSamples));
CUDA_CHECK_ERROR(cudaFree(cudaBSP.rgbExp32LightSamples));
CUDA_CHECK_ERROR(cudaFree(cudaBSP.texInfos));
CUDA_CHECK_ERROR(cudaFree(cudaBSP.texDatas));
CUDA_CHECK_ERROR(cudaFree(cudaBSP.nodes));
CUDA_CHECK_ERROR(cudaFree(cudaBSP.leaves));
CUDA_CHECK_ERROR(cudaFree(cudaBSP.ambientIndices));
CUDA_CHECK_ERROR(cudaFree(cudaBSP.ambientLightSamples));
CUDA_CHECK_ERROR(cudaFree(cudaBSP.worldLights));
CUDA_CHECK_ERROR(cudaFree(cudaBSP.visMatrix));
/* Free the device pointer itself. */
CUDA_CHECK_ERROR(cudaFree(pCudaBSP));
}
void convert_lightsamples(CUDABSP* pCudaBSP) {
CUDABSP cudaBSP;
CUDA_CHECK_ERROR(
cudaMemcpy(
&cudaBSP, pCudaBSP, sizeof(CUDABSP),
cudaMemcpyDeviceToHost
)
);
const size_t BLOCK_WIDTH = 1024;
size_t numBlocks = div_ceil(cudaBSP.numLightSamples, BLOCK_WIDTH);
KERNEL_LAUNCH(
map_lightsamples,
numBlocks, BLOCK_WIDTH,
cudaBSP.lightSamples, cudaBSP.rgbExp32LightSamples,
cudaBSP.numLightSamples
);
CUDA_CHECK_ERROR(cudaDeviceSynchronize());
}
void update_bsp(BSP::BSP& bsp, CUDABSP* pCudaBSP) {
CUDABSP cudaBSP;
CUDA_CHECK_ERROR(
cudaMemcpy(
&cudaBSP, pCudaBSP, sizeof(CUDABSP),
cudaMemcpyDeviceToHost
)
);
CUDA_CHECK_ERROR(
cudaMemcpy(
bsp.get_lightsamples().data(), cudaBSP.rgbExp32LightSamples,
sizeof(BSP::RGBExp32) * cudaBSP.numLightSamples,
cudaMemcpyDeviceToHost
)
);
CUDA_CHECK_ERROR(
cudaMemcpy(
bsp.get_dfaces().data(), cudaBSP.faces,
sizeof(BSP::DFace) * cudaBSP.numFaces,
cudaMemcpyDeviceToHost
)
);
CUDA_CHECK_ERROR(
cudaMemcpy(
bsp.get_ambient_samples().data(), cudaBSP.ambientLightSamples,
sizeof(BSP::DLeafAmbientLighting)
* cudaBSP.numAmbientLightSamples,
cudaMemcpyDeviceToHost
)
);
}
}
|
the_stack
|
#include "cudautils.h"
#include "cudaSiftD.h"
#include "cudaSift.h"
///////////////////////////////////////////////////////////////////////////////
// Kernel configuration
///////////////////////////////////////////////////////////////////////////////
__constant__ int d_MaxNumPoints;
__device__ unsigned int d_PointCounter[8*2+1];
__constant__ float d_ScaleDownKernel[5];
__constant__ float d_LowPassKernel[2*LOWPASS_R+1];
__constant__ float d_LaplaceKernel[8*12*16];
///////////////////////////////////////////////////////////////////////////////
// Lowpass filter and subsample image
///////////////////////////////////////////////////////////////////////////////
__global__ void ScaleDownDenseShift(float *d_Result, float *d_Data, int width, int pitch, int height, int newpitch)
{
#define BW (SCALEDOWN_W+4)
#define BH (SCALEDOWN_H+4)
#define W2 (SCALEDOWN_W/2)
#define H2 (SCALEDOWN_H/2)
__shared__ float brows[BH*BW];
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int xp = blockIdx.x*SCALEDOWN_W + tx;
const int yp = blockIdx.y*SCALEDOWN_H + ty;
const float k0 = d_ScaleDownKernel[0];
const float k1 = d_ScaleDownKernel[1];
const float k2 = d_ScaleDownKernel[2];
const int xl = min(width-1, max(0, xp-2));
const int yl = min(height-1, max(0, yp-2));
if (xp<(width+4) && yp<(height+4)) {
float v = d_Data[yl*pitch + xl];
brows[BW*ty + tx] = k0*(v + ShiftDown(v, 4)) + k1*(ShiftDown(v, 1) + ShiftDown(v, 3)) + k2*ShiftDown(v, 2);
}
__syncthreads();
const int xs = blockIdx.x*W2 + tx;
const int ys = blockIdx.y*H2 + ty;
if (tx<W2 && ty<H2 && xs<(width/2) && ys<(height/2)) {
float *ptr = &brows[BW*(ty*2) + (tx*2)];
d_Result[ys*newpitch + xs] = k0*(ptr[0] + ptr[4*BW]) + k1*(ptr[1*BW] + ptr[3*BW]) + k2*ptr[2*BW];
}
}
__global__ void ScaleDownDense(float *d_Result, float *d_Data, int width, int pitch, int height, int newpitch)
{
#define BW (SCALEDOWN_W+4)
#define BH (SCALEDOWN_H+4)
#define W2 (SCALEDOWN_W/2)
#define H2 (SCALEDOWN_H/2)
__shared__ float irows[BH*BW];
__shared__ float brows[BH*W2];
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int xp = blockIdx.x*SCALEDOWN_W + tx;
const int yp = blockIdx.y*SCALEDOWN_H + ty;
const int xl = min(width-1, max(0, xp-2));
const int yl = min(height-1, max(0, yp-2));
const float k0 = d_ScaleDownKernel[0];
const float k1 = d_ScaleDownKernel[1];
const float k2 = d_ScaleDownKernel[2];
if (xp<(width+4) && yp<(height+4))
irows[BW*ty + tx] = d_Data[yl*pitch + xl];
__syncthreads();
if (yp<(height+4) && tx<W2) {
float *ptr = &irows[BW*ty + 2*tx];
brows[W2*ty + tx] = k0*(ptr[0] + ptr[4]) + k1*(ptr[1] + ptr[3]) + k2*ptr[2];
}
__syncthreads();
const int xs = blockIdx.x*W2 + tx;
const int ys = blockIdx.y*H2 + ty;
if (tx<W2 && ty<H2 && xs<(width/2) && ys<(height/2)) {
float *ptr = &brows[W2*(ty*2) + tx];
d_Result[ys*newpitch + xs] = k0*(ptr[0] + ptr[4*W2]) + k1*(ptr[1*W2] + ptr[3*W2]) + k2*ptr[2*W2];
}
}
__global__ void ScaleDown(float *d_Result, float *d_Data, int width, int pitch, int height, int newpitch)
{
__shared__ float inrow[SCALEDOWN_W+4];
__shared__ float brow[5*(SCALEDOWN_W/2)];
__shared__ int yRead[SCALEDOWN_H+4];
__shared__ int yWrite[SCALEDOWN_H+4];
#define dx2 (SCALEDOWN_W/2)
const int tx = threadIdx.x;
const int tx0 = tx + 0*dx2;
const int tx1 = tx + 1*dx2;
const int tx2 = tx + 2*dx2;
const int tx3 = tx + 3*dx2;
const int tx4 = tx + 4*dx2;
const int xStart = blockIdx.x*SCALEDOWN_W;
const int yStart = blockIdx.y*SCALEDOWN_H;
const int xWrite = xStart/2 + tx;
float k0 = d_ScaleDownKernel[0];
float k1 = d_ScaleDownKernel[1];
float k2 = d_ScaleDownKernel[2];
if (tx<SCALEDOWN_H+4) {
int y = yStart + tx - 2;
y = (y<0 ? 0 : y);
y = (y>=height ? height-1 : y);
yRead[tx] = y*pitch;
yWrite[tx] = (yStart + tx - 4)/2 * newpitch;
}
__syncthreads();
int xRead = xStart + tx - 2;
xRead = (xRead<0 ? 0 : xRead);
xRead = (xRead>=width ? width-1 : xRead);
int maxtx = min(dx2, width/2 - xStart/2);
for (int dy=0;dy<SCALEDOWN_H+4;dy+=5) {
{
inrow[tx] = d_Data[yRead[dy+0] + xRead];
__syncthreads();
if (tx<maxtx) {
brow[tx4] = k0*(inrow[2*tx]+inrow[2*tx+4]) + k1*(inrow[2*tx+1]+inrow[2*tx+3]) + k2*inrow[2*tx+2];
if (dy>=4 && !(dy&1))
d_Result[yWrite[dy+0] + xWrite] = k2*brow[tx2] + k0*(brow[tx0]+brow[tx4]) + k1*(brow[tx1]+brow[tx3]);
}
__syncthreads();
}
if (dy<(SCALEDOWN_H+3)) {
inrow[tx] = d_Data[yRead[dy+1] + xRead];
__syncthreads();
if (tx<maxtx) {
brow[tx0] = k0*(inrow[2*tx]+inrow[2*tx+4]) + k1*(inrow[2*tx+1]+inrow[2*tx+3]) + k2*inrow[2*tx+2];
if (dy>=3 && (dy&1))
d_Result[yWrite[dy+1] + xWrite] = k2*brow[tx3] + k0*(brow[tx1]+brow[tx0]) + k1*(brow[tx2]+brow[tx4]);
}
__syncthreads();
}
if (dy<(SCALEDOWN_H+2)) {
inrow[tx] = d_Data[yRead[dy+2] + xRead];
__syncthreads();
if (tx<maxtx) {
brow[tx1] = k0*(inrow[2*tx]+inrow[2*tx+4]) + k1*(inrow[2*tx+1]+inrow[2*tx+3]) + k2*inrow[2*tx+2];
if (dy>=2 && !(dy&1))
d_Result[yWrite[dy+2] + xWrite] = k2*brow[tx4] + k0*(brow[tx2]+brow[tx1]) + k1*(brow[tx3]+brow[tx0]);
}
__syncthreads();
}
if (dy<(SCALEDOWN_H+1)) {
inrow[tx] = d_Data[yRead[dy+3] + xRead];
__syncthreads();
if (tx<maxtx) {
brow[tx2] = k0*(inrow[2*tx]+inrow[2*tx+4]) + k1*(inrow[2*tx+1]+inrow[2*tx+3]) + k2*inrow[2*tx+2];
if (dy>=1 && (dy&1))
d_Result[yWrite[dy+3] + xWrite] = k2*brow[tx0] + k0*(brow[tx3]+brow[tx2]) + k1*(brow[tx4]+brow[tx1]);
}
__syncthreads();
}
if (dy<SCALEDOWN_H) {
inrow[tx] = d_Data[yRead[dy+4] + xRead];
__syncthreads();
if (tx<dx2 && xWrite<width/2) {
brow[tx3] = k0*(inrow[2*tx]+inrow[2*tx+4]) + k1*(inrow[2*tx+1]+inrow[2*tx+3]) + k2*inrow[2*tx+2];
if (!(dy&1))
d_Result[yWrite[dy+4] + xWrite] = k2*brow[tx1] + k0*(brow[tx4]+brow[tx3]) + k1*(brow[tx0]+brow[tx2]);
}
__syncthreads();
}
}
}
__global__ void ScaleUp(float *d_Result, float *d_Data, int width, int pitch, int height, int newpitch)
{
const int tx = threadIdx.x;
const int ty = threadIdx.y;
int x = blockIdx.x*SCALEUP_W + 2*tx;
int y = blockIdx.y*SCALEUP_H + 2*ty;
if (x<2*width && y<2*height) {
int xl = blockIdx.x*(SCALEUP_W/2) + tx;
int yu = blockIdx.y*(SCALEUP_H/2) + ty;
int xr = min(xl + 1, width - 1);
int yd = min(yu + 1, height - 1);
float vul = d_Data[yu*pitch + xl];
float vur = d_Data[yu*pitch + xr];
float vdl = d_Data[yd*pitch + xl];
float vdr = d_Data[yd*pitch + xr];
d_Result[(y + 0)*newpitch + x + 0] = vul;
d_Result[(y + 0)*newpitch + x + 1] = 0.50f*(vul + vur);
d_Result[(y + 1)*newpitch + x + 0] = 0.50f*(vul + vdl);
d_Result[(y + 1)*newpitch + x + 1] = 0.25f*(vul + vur + vdl + vdr);
}
}
__global__ void ExtractSiftDescriptors(cudaTextureObject_t texObj, SiftPoint *d_sift, int fstPts, float subsampling)
{
__shared__ float gauss[16];
__shared__ float buffer[128];
__shared__ float sums[4];
const int tx = threadIdx.x; // 0 -> 16
const int ty = threadIdx.y; // 0 -> 8
const int idx = ty*16 + tx;
const int bx = blockIdx.x + fstPts; // 0 -> numPts
if (ty==0)
gauss[tx] = exp(-(tx-7.5f)*(tx-7.5f)/128.0f);
buffer[idx] = 0.0f;
__syncthreads();
// Compute angles and gradients
float theta = 2.0f*3.1415f/360.0f*d_sift[bx].orientation;
float sina = sinf(theta); // cosa -sina
float cosa = cosf(theta); // sina cosa
float scale = 12.0f/16.0f*d_sift[bx].scale;
float ssina = scale*sina;
float scosa = scale*cosa;
for (int y=ty;y<16;y+=8) {
float xpos = d_sift[bx].xpos + (tx-7.5f)*scosa - (y-7.5f)*ssina + 0.5f;
float ypos = d_sift[bx].ypos + (tx-7.5f)*ssina + (y-7.5f)*scosa + 0.5f;
float dx = tex2D<float>(texObj, xpos+cosa, ypos+sina) -
tex2D<float>(texObj, xpos-cosa, ypos-sina);
float dy = tex2D<float>(texObj, xpos-sina, ypos+cosa) -
tex2D<float>(texObj, xpos+sina, ypos-cosa);
float grad = gauss[y]*gauss[tx] * sqrtf(dx*dx + dy*dy);
float angf = 4.0f/3.1415f*atan2f(dy, dx) + 4.0f;
int hori = (tx + 2)/4 - 1; // Convert from (tx,y,angle) to bins
float horf = (tx - 1.5f)/4.0f - hori;
float ihorf = 1.0f - horf;
int veri = (y + 2)/4 - 1;
float verf = (y - 1.5f)/4.0f - veri;
float iverf = 1.0f - verf;
int angi = angf;
int angp = (angi<7 ? angi+1 : 0);
angf -= angi;
float iangf = 1.0f - angf;
int hist = 8*(4*veri + hori); // Each gradient measure is interpolated
int p1 = angi + hist; // in angles, xpos and ypos -> 8 stores
int p2 = angp + hist;
if (tx>=2) {
float grad1 = ihorf*grad;
if (y>=2) { // Upper left
float grad2 = iverf*grad1;
atomicAdd(buffer + p1, iangf*grad2);
atomicAdd(buffer + p2, angf*grad2);
}
if (y<=13) { // Lower left
float grad2 = verf*grad1;
atomicAdd(buffer + p1+32, iangf*grad2);
atomicAdd(buffer + p2+32, angf*grad2);
}
}
if (tx<=13) {
float grad1 = horf*grad;
if (y>=2) { // Upper right
float grad2 = iverf*grad1;
atomicAdd(buffer + p1+8, iangf*grad2);
atomicAdd(buffer + p2+8, angf*grad2);
}
if (y<=13) { // Lower right
float grad2 = verf*grad1;
atomicAdd(buffer + p1+40, iangf*grad2);
atomicAdd(buffer + p2+40, angf*grad2);
}
}
}
__syncthreads();
// Normalize twice and suppress peaks first time
float sum = buffer[idx]*buffer[idx];
for (int i=16;i>0;i/=2)
sum += ShiftDown(sum, i);
if ((idx&31)==0)
sums[idx/32] = sum;
__syncthreads();
float tsum1 = sums[0] + sums[1] + sums[2] + sums[3];
tsum1 = min(buffer[idx] * rsqrtf(tsum1), 0.2f);
sum = tsum1*tsum1;
for (int i=16;i>0;i/=2)
sum += ShiftDown(sum, i);
if ((idx&31)==0)
sums[idx/32] = sum;
__syncthreads();
float tsum2 = sums[0] + sums[1] + sums[2] + sums[3];
float *desc = d_sift[bx].data;
desc[idx] = tsum1 * rsqrtf(tsum2);
if (idx==0) {
d_sift[bx].xpos *= subsampling;
d_sift[bx].ypos *= subsampling;
d_sift[bx].scale *= subsampling;
}
}
__device__ float FastAtan2(float y, float x)
{
float absx = abs(x);
float absy = abs(y);
float a = __fdiv_rn(min(absx, absy), max(absx, absy));
float s = a*a;
float r = ((-0.0464964749f*s + 0.15931422f)*s - 0.327622764f)*s*a + a;
r = (absy>absx ? 1.57079637f - r : r);
r = (x<0 ? 3.14159274f - r : r);
r = (y<0 ? -r : r);
return r;
}
__global__ void ExtractSiftDescriptorsCONSTNew(cudaTextureObject_t texObj, SiftPoint *d_sift, float subsampling, int octave)
{
__shared__ float gauss[16];
__shared__ float buffer[128];
__shared__ float sums[4];
const int tx = threadIdx.x; // 0 -> 16
const int ty = threadIdx.y; // 0 -> 8
const int idx = ty*16 + tx;
if (ty==0)
gauss[tx] = __expf(-(tx-7.5f)*(tx-7.5f)/128.0f);
int fstPts = min(d_PointCounter[2*octave-1], d_MaxNumPoints);
int totPts = min(d_PointCounter[2*octave+1], d_MaxNumPoints);
//if (tx==0 && ty==0)
// printf("%d %d %d %d\n", octave, fstPts, min(d_PointCounter[2*octave], d_MaxNumPoints), totPts);
for (int bx = blockIdx.x + fstPts; bx < totPts; bx += gridDim.x) {
buffer[idx] = 0.0f;
__syncthreads();
// Compute angles and gradients
float theta = 2.0f*3.1415f/360.0f*d_sift[bx].orientation;
float sina = __sinf(theta); // cosa -sina
float cosa = __cosf(theta); // sina cosa
float scale = 12.0f/16.0f*d_sift[bx].scale;
float ssina = scale*sina;
float scosa = scale*cosa;
for (int y=ty;y<16;y+=8) {
float xpos = d_sift[bx].xpos + (tx-7.5f)*scosa - (y-7.5f)*ssina + 0.5f;
float ypos = d_sift[bx].ypos + (tx-7.5f)*ssina + (y-7.5f)*scosa + 0.5f;
float dx = tex2D<float>(texObj, xpos+cosa, ypos+sina) -
tex2D<float>(texObj, xpos-cosa, ypos-sina);
float dy = tex2D<float>(texObj, xpos-sina, ypos+cosa) -
tex2D<float>(texObj, xpos+sina, ypos-cosa);
float grad = gauss[y]*gauss[tx] * __fsqrt_rn(dx*dx + dy*dy);
float angf = 4.0f/3.1415f*FastAtan2(dy, dx) + 4.0f;
int hori = (tx + 2)/4 - 1; // Convert from (tx,y,angle) to bins
float horf = (tx - 1.5f)/4.0f - hori;
float ihorf = 1.0f - horf;
int veri = (y + 2)/4 - 1;
float verf = (y - 1.5f)/4.0f - veri;
float iverf = 1.0f - verf;
int angi = angf;
int angp = (angi<7 ? angi+1 : 0);
angf -= angi;
float iangf = 1.0f - angf;
int hist = 8*(4*veri + hori); // Each gradient measure is interpolated
int p1 = angi + hist; // in angles, xpos and ypos -> 8 stores
int p2 = angp + hist;
if (tx>=2) {
float grad1 = ihorf*grad;
if (y>=2) { // Upper left
float grad2 = iverf*grad1;
atomicAdd(buffer + p1, iangf*grad2);
atomicAdd(buffer + p2, angf*grad2);
}
if (y<=13) { // Lower left
float grad2 = verf*grad1;
atomicAdd(buffer + p1+32, iangf*grad2);
atomicAdd(buffer + p2+32, angf*grad2);
}
}
if (tx<=13) {
float grad1 = horf*grad;
if (y>=2) { // Upper right
float grad2 = iverf*grad1;
atomicAdd(buffer + p1+8, iangf*grad2);
atomicAdd(buffer + p2+8, angf*grad2);
}
if (y<=13) { // Lower right
float grad2 = verf*grad1;
atomicAdd(buffer + p1+40, iangf*grad2);
atomicAdd(buffer + p2+40, angf*grad2);
}
}
}
__syncthreads();
// Normalize twice and suppress peaks first time
float sum = buffer[idx]*buffer[idx];
for (int i=16;i>0;i/=2)
sum += ShiftDown(sum, i);
if ((idx&31)==0)
sums[idx/32] = sum;
__syncthreads();
float tsum1 = sums[0] + sums[1] + sums[2] + sums[3];
tsum1 = min(buffer[idx] * rsqrtf(tsum1), 0.2f);
sum = tsum1*tsum1;
for (int i=16;i>0;i/=2)
sum += ShiftDown(sum, i);
if ((idx&31)==0)
sums[idx/32] = sum;
__syncthreads();
float tsum2 = sums[0] + sums[1] + sums[2] + sums[3];
float *desc = d_sift[bx].data;
desc[idx] = tsum1 * rsqrtf(tsum2);
if (idx==0) {
d_sift[bx].xpos *= subsampling;
d_sift[bx].ypos *= subsampling;
d_sift[bx].scale *= subsampling;
}
__syncthreads();
}
}
__global__ void ExtractSiftDescriptorsCONST(cudaTextureObject_t texObj, SiftPoint *d_sift, float subsampling, int octave)
{
__shared__ float gauss[16];
__shared__ float buffer[128];
__shared__ float sums[4];
const int tx = threadIdx.x; // 0 -> 16
const int ty = threadIdx.y; // 0 -> 8
const int idx = ty*16 + tx;
if (ty==0)
gauss[tx] = exp(-(tx-7.5f)*(tx-7.5f)/128.0f);
int fstPts = min(d_PointCounter[2*octave-1], d_MaxNumPoints);
int totPts = min(d_PointCounter[2*octave+1], d_MaxNumPoints);
//if (tx==0 && ty==0)
// printf("%d %d %d %d\n", octave, fstPts, min(d_PointCounter[2*octave], d_MaxNumPoints), totPts);
for (int bx = blockIdx.x + fstPts; bx < totPts; bx += gridDim.x) {
buffer[idx] = 0.0f;
__syncthreads();
// Compute angles and gradients
float theta = 2.0f*3.1415f/360.0f*d_sift[bx].orientation;
float sina = sinf(theta); // cosa -sina
float cosa = cosf(theta); // sina cosa
float scale = 12.0f/16.0f*d_sift[bx].scale;
float ssina = scale*sina;
float scosa = scale*cosa;
for (int y=ty;y<16;y+=8) {
float xpos = d_sift[bx].xpos + (tx-7.5f)*scosa - (y-7.5f)*ssina + 0.5f;
float ypos = d_sift[bx].ypos + (tx-7.5f)*ssina + (y-7.5f)*scosa + 0.5f;
float dx = tex2D<float>(texObj, xpos+cosa, ypos+sina) -
tex2D<float>(texObj, xpos-cosa, ypos-sina);
float dy = tex2D<float>(texObj, xpos-sina, ypos+cosa) -
tex2D<float>(texObj, xpos+sina, ypos-cosa);
float grad = gauss[y]*gauss[tx] * sqrtf(dx*dx + dy*dy);
float angf = 4.0f/3.1415f*atan2f(dy, dx) + 4.0f;
int hori = (tx + 2)/4 - 1; // Convert from (tx,y,angle) to bins
float horf = (tx - 1.5f)/4.0f - hori;
float ihorf = 1.0f - horf;
int veri = (y + 2)/4 - 1;
float verf = (y - 1.5f)/4.0f - veri;
float iverf = 1.0f - verf;
int angi = angf;
int angp = (angi<7 ? angi+1 : 0);
angf -= angi;
float iangf = 1.0f - angf;
int hist = 8*(4*veri + hori); // Each gradient measure is interpolated
int p1 = angi + hist; // in angles, xpos and ypos -> 8 stores
int p2 = angp + hist;
if (tx>=2) {
float grad1 = ihorf*grad;
if (y>=2) { // Upper left
float grad2 = iverf*grad1;
atomicAdd(buffer + p1, iangf*grad2);
atomicAdd(buffer + p2, angf*grad2);
}
if (y<=13) { // Lower left
float grad2 = verf*grad1;
atomicAdd(buffer + p1+32, iangf*grad2);
atomicAdd(buffer + p2+32, angf*grad2);
}
}
if (tx<=13) {
float grad1 = horf*grad;
if (y>=2) { // Upper right
float grad2 = iverf*grad1;
atomicAdd(buffer + p1+8, iangf*grad2);
atomicAdd(buffer + p2+8, angf*grad2);
}
if (y<=13) { // Lower right
float grad2 = verf*grad1;
atomicAdd(buffer + p1+40, iangf*grad2);
atomicAdd(buffer + p2+40, angf*grad2);
}
}
}
__syncthreads();
// Normalize twice and suppress peaks first time
float sum = buffer[idx]*buffer[idx];
for (int i=16;i>0;i/=2)
sum += ShiftDown(sum, i);
if ((idx&31)==0)
sums[idx/32] = sum;
__syncthreads();
float tsum1 = sums[0] + sums[1] + sums[2] + sums[3];
tsum1 = min(buffer[idx] * rsqrtf(tsum1), 0.2f);
sum = tsum1*tsum1;
for (int i=16;i>0;i/=2)
sum += ShiftDown(sum, i);
if ((idx&31)==0)
sums[idx/32] = sum;
__syncthreads();
float tsum2 = sums[0] + sums[1] + sums[2] + sums[3];
float *desc = d_sift[bx].data;
desc[idx] = tsum1 * rsqrtf(tsum2);
if (idx==0) {
d_sift[bx].xpos *= subsampling;
d_sift[bx].ypos *= subsampling;
d_sift[bx].scale *= subsampling;
}
__syncthreads();
}
}
__global__ void ExtractSiftDescriptorsOld(cudaTextureObject_t texObj, SiftPoint *d_sift, int fstPts, float subsampling)
{
__shared__ float gauss[16];
__shared__ float buffer[128];
__shared__ float sums[128];
const int tx = threadIdx.x; // 0 -> 16
const int ty = threadIdx.y; // 0 -> 8
const int idx = ty*16 + tx;
const int bx = blockIdx.x + fstPts; // 0 -> numPts
if (ty==0)
gauss[tx] = exp(-(tx-7.5f)*(tx-7.5f)/128.0f);
buffer[idx] = 0.0f;
__syncthreads();
// Compute angles and gradients
float theta = 2.0f*3.1415f/360.0f*d_sift[bx].orientation;
float sina = sinf(theta); // cosa -sina
float cosa = cosf(theta); // sina cosa
float scale = 12.0f/16.0f*d_sift[bx].scale;
float ssina = scale*sina;
float scosa = scale*cosa;
for (int y=ty;y<16;y+=8) {
float xpos = d_sift[bx].xpos + (tx-7.5f)*scosa - (y-7.5f)*ssina + 0.5f;
float ypos = d_sift[bx].ypos + (tx-7.5f)*ssina + (y-7.5f)*scosa + 0.5f;
float dx = tex2D<float>(texObj, xpos+cosa, ypos+sina) -
tex2D<float>(texObj, xpos-cosa, ypos-sina);
float dy = tex2D<float>(texObj, xpos-sina, ypos+cosa) -
tex2D<float>(texObj, xpos+sina, ypos-cosa);
float grad = gauss[y]*gauss[tx] * sqrtf(dx*dx + dy*dy);
float angf = 4.0f/3.1415f*atan2f(dy, dx) + 4.0f;
int hori = (tx + 2)/4 - 1; // Convert from (tx,y,angle) to bins
float horf = (tx - 1.5f)/4.0f - hori;
float ihorf = 1.0f - horf;
int veri = (y + 2)/4 - 1;
float verf = (y - 1.5f)/4.0f - veri;
float iverf = 1.0f - verf;
int angi = angf;
int angp = (angi<7 ? angi+1 : 0);
angf -= angi;
float iangf = 1.0f - angf;
int hist = 8*(4*veri + hori); // Each gradient measure is interpolated
int p1 = angi + hist; // in angles, xpos and ypos -> 8 stores
int p2 = angp + hist;
if (tx>=2) {
float grad1 = ihorf*grad;
if (y>=2) { // Upper left
float grad2 = iverf*grad1;
atomicAdd(buffer + p1, iangf*grad2);
atomicAdd(buffer + p2, angf*grad2);
}
if (y<=13) { // Lower left
float grad2 = verf*grad1;
atomicAdd(buffer + p1+32, iangf*grad2);
atomicAdd(buffer + p2+32, angf*grad2);
}
}
if (tx<=13) {
float grad1 = horf*grad;
if (y>=2) { // Upper right
float grad2 = iverf*grad1;
atomicAdd(buffer + p1+8, iangf*grad2);
atomicAdd(buffer + p2+8, angf*grad2);
}
if (y<=13) { // Lower right
float grad2 = verf*grad1;
atomicAdd(buffer + p1+40, iangf*grad2);
atomicAdd(buffer + p2+40, angf*grad2);
}
}
}
__syncthreads();
// Normalize twice and suppress peaks first time
if (idx<64)
sums[idx] = buffer[idx]*buffer[idx] + buffer[idx+64]*buffer[idx+64];
__syncthreads();
if (idx<32) sums[idx] = sums[idx] + sums[idx+32];
__syncthreads();
if (idx<16) sums[idx] = sums[idx] + sums[idx+16];
__syncthreads();
if (idx<8) sums[idx] = sums[idx] + sums[idx+8];
__syncthreads();
if (idx<4) sums[idx] = sums[idx] + sums[idx+4];
__syncthreads();
float tsum1 = sums[0] + sums[1] + sums[2] + sums[3];
buffer[idx] = buffer[idx] * rsqrtf(tsum1);
if (buffer[idx]>0.2f)
buffer[idx] = 0.2f;
__syncthreads();
if (idx<64)
sums[idx] = buffer[idx]*buffer[idx] + buffer[idx+64]*buffer[idx+64];
__syncthreads();
if (idx<32) sums[idx] = sums[idx] + sums[idx+32];
__syncthreads();
if (idx<16) sums[idx] = sums[idx] + sums[idx+16];
__syncthreads();
if (idx<8) sums[idx] = sums[idx] + sums[idx+8];
__syncthreads();
if (idx<4) sums[idx] = sums[idx] + sums[idx+4];
__syncthreads();
float tsum2 = sums[0] + sums[1] + sums[2] + sums[3];
float *desc = d_sift[bx].data;
desc[idx] = buffer[idx] * rsqrtf(tsum2);
if (idx==0) {
d_sift[bx].xpos *= subsampling;
d_sift[bx].ypos *= subsampling;
d_sift[bx].scale *= subsampling;
}
}
__device__ void ExtractSiftDescriptor(cudaTextureObject_t texObj, SiftPoint *d_sift, float subsampling, int octave, int bx)
{
__shared__ float gauss[16];
__shared__ float buffer[128];
__shared__ float sums[4];
const int idx = threadIdx.x;
const int tx = idx & 15; // 0 -> 16
const int ty = idx / 16; // 0 -> 8
if (ty==0)
gauss[tx] = exp(-(tx-7.5f)*(tx-7.5f)/128.0f);
buffer[idx] = 0.0f;
__syncthreads();
// Compute angles and gradients
float theta = 2.0f*3.1415f/360.0f*d_sift[bx].orientation;
float sina = sinf(theta); // cosa -sina
float cosa = cosf(theta); // sina cosa
float scale = 12.0f/16.0f*d_sift[bx].scale;
float ssina = scale*sina;
float scosa = scale*cosa;
for (int y=ty;y<16;y+=8) {
float xpos = d_sift[bx].xpos + (tx-7.5f)*scosa - (y-7.5f)*ssina + 0.5f;
float ypos = d_sift[bx].ypos + (tx-7.5f)*ssina + (y-7.5f)*scosa + 0.5f;
float dx = tex2D<float>(texObj, xpos+cosa, ypos+sina) -
tex2D<float>(texObj, xpos-cosa, ypos-sina);
float dy = tex2D<float>(texObj, xpos-sina, ypos+cosa) -
tex2D<float>(texObj, xpos+sina, ypos-cosa);
float grad = gauss[y]*gauss[tx] * sqrtf(dx*dx + dy*dy);
float angf = 4.0f/3.1415f*atan2f(dy, dx) + 4.0f;
int hori = (tx + 2)/4 - 1; // Convert from (tx,y,angle) to bins
float horf = (tx - 1.5f)/4.0f - hori;
float ihorf = 1.0f - horf;
int veri = (y + 2)/4 - 1;
float verf = (y - 1.5f)/4.0f - veri;
float iverf = 1.0f - verf;
int angi = angf;
int angp = (angi<7 ? angi+1 : 0);
angf -= angi;
float iangf = 1.0f - angf;
int hist = 8*(4*veri + hori); // Each gradient measure is interpolated
int p1 = angi + hist; // in angles, xpos and ypos -> 8 stores
int p2 = angp + hist;
if (tx>=2) {
float grad1 = ihorf*grad;
if (y>=2) { // Upper left
float grad2 = iverf*grad1;
atomicAdd(buffer + p1, iangf*grad2);
atomicAdd(buffer + p2, angf*grad2);
}
if (y<=13) { // Lower left
float grad2 = verf*grad1;
atomicAdd(buffer + p1+32, iangf*grad2);
atomicAdd(buffer + p2+32, angf*grad2);
}
}
if (tx<=13) {
float grad1 = horf*grad;
if (y>=2) { // Upper right
float grad2 = iverf*grad1;
atomicAdd(buffer + p1+8, iangf*grad2);
atomicAdd(buffer + p2+8, angf*grad2);
}
if (y<=13) { // Lower right
float grad2 = verf*grad1;
atomicAdd(buffer + p1+40, iangf*grad2);
atomicAdd(buffer + p2+40, angf*grad2);
}
}
}
__syncthreads();
// Normalize twice and suppress peaks first time
float sum = buffer[idx]*buffer[idx];
for (int i=16;i>0;i/=2)
sum += ShiftDown(sum, i);
if ((idx&31)==0)
sums[idx/32] = sum;
__syncthreads();
float tsum1 = sums[0] + sums[1] + sums[2] + sums[3];
tsum1 = min(buffer[idx] * rsqrtf(tsum1), 0.2f);
sum = tsum1*tsum1;
for (int i=16;i>0;i/=2)
sum += ShiftDown(sum, i);
if ((idx&31)==0)
sums[idx/32] = sum;
__syncthreads();
float tsum2 = sums[0] + sums[1] + sums[2] + sums[3];
float *desc = d_sift[bx].data;
desc[idx] = tsum1 * rsqrtf(tsum2);
if (idx==0) {
d_sift[bx].xpos *= subsampling;
d_sift[bx].ypos *= subsampling;
d_sift[bx].scale *= subsampling;
}
__syncthreads();
}
__global__ void RescalePositions(SiftPoint *d_sift, int numPts, float scale)
{
int num = blockIdx.x*blockDim.x + threadIdx.x;
if (num<numPts) {
d_sift[num].xpos *= scale;
d_sift[num].ypos *= scale;
d_sift[num].scale *= scale;
}
}
__global__ void ComputeOrientations(cudaTextureObject_t texObj, SiftPoint *d_Sift, int fstPts)
{
__shared__ float hist[64];
__shared__ float gauss[11];
const int tx = threadIdx.x;
const int bx = blockIdx.x + fstPts;
float i2sigma2 = -1.0f/(4.5f*d_Sift[bx].scale*d_Sift[bx].scale);
if (tx<11)
gauss[tx] = exp(i2sigma2*(tx-5)*(tx-5));
if (tx<64)
hist[tx] = 0.0f;
__syncthreads();
float xp = d_Sift[bx].xpos - 4.5f;
float yp = d_Sift[bx].ypos - 4.5f;
int yd = tx/11;
int xd = tx - yd*11;
float xf = xp + xd;
float yf = yp + yd;
if (yd<11) {
float dx = tex2D<float>(texObj, xf+1.0, yf) - tex2D<float>(texObj, xf-1.0, yf);
float dy = tex2D<float>(texObj, xf, yf+1.0) - tex2D<float>(texObj, xf, yf-1.0);
int bin = 16.0f*atan2f(dy, dx)/3.1416f + 16.5f;
if (bin>31)
bin = 0;
float grad = sqrtf(dx*dx + dy*dy);
atomicAdd(&hist[bin], grad*gauss[xd]*gauss[yd]);
}
__syncthreads();
int x1m = (tx>=1 ? tx-1 : tx+31);
int x1p = (tx<=30 ? tx+1 : tx-31);
if (tx<32) {
int x2m = (tx>=2 ? tx-2 : tx+30);
int x2p = (tx<=29 ? tx+2 : tx-30);
hist[tx+32] = 6.0f*hist[tx] + 4.0f*(hist[x1m] + hist[x1p]) + (hist[x2m] + hist[x2p]);
}
__syncthreads();
if (tx<32) {
float v = hist[32+tx];
hist[tx] = (v>hist[32+x1m] && v>=hist[32+x1p] ? v : 0.0f);
}
__syncthreads();
if (tx==0) {
float maxval1 = 0.0;
float maxval2 = 0.0;
int i1 = -1;
int i2 = -1;
for (int i=0;i<32;i++) {
float v = hist[i];
if (v>maxval1) {
maxval2 = maxval1;
maxval1 = v;
i2 = i1;
i1 = i;
} else if (v>maxval2) {
maxval2 = v;
i2 = i;
}
}
float val1 = hist[32+((i1+1)&31)];
float val2 = hist[32+((i1+31)&31)];
float peak = i1 + 0.5f*(val1-val2) / (2.0f*maxval1-val1-val2);
d_Sift[bx].orientation = 11.25f*(peak<0.0f ? peak+32.0f : peak);
if (maxval2>0.8f*maxval1) {
float val1 = hist[32+((i2+1)&31)];
float val2 = hist[32+((i2+31)&31)];
float peak = i2 + 0.5f*(val1-val2) / (2.0f*maxval2-val1-val2);
unsigned int idx = atomicInc(d_PointCounter, 0x7fffffff);
if (idx<d_MaxNumPoints) {
d_Sift[idx].xpos = d_Sift[bx].xpos;
d_Sift[idx].ypos = d_Sift[bx].ypos;
d_Sift[idx].scale = d_Sift[bx].scale;
d_Sift[idx].sharpness = d_Sift[bx].sharpness;
d_Sift[idx].edgeness = d_Sift[bx].edgeness;
d_Sift[idx].orientation = 11.25f*(peak<0.0f ? peak+32.0f : peak);;
d_Sift[idx].subsampling = d_Sift[bx].subsampling;
}
}
}
}
// With constant number of blocks
__global__ void ComputeOrientationsCONSTNew(float *image, int w, int p, int h, SiftPoint *d_Sift, int octave)
{
#define RAD 9
#define WID (2*RAD + 1)
#define LEN 32 //%%%% Note: Lowe suggests 36, not 32
__shared__ float img[WID][WID], tmp[WID][WID];
__shared__ float hist[2*LEN];
__shared__ float gaussx[WID], gaussy[WID];
const int tx = threadIdx.x;
int fstPts = min(d_PointCounter[2*octave-1], d_MaxNumPoints);
int totPts = min(d_PointCounter[2*octave+0], d_MaxNumPoints);
for (int bx = blockIdx.x + fstPts; bx < totPts; bx += gridDim.x) {
float sc = d_Sift[bx].scale;
for (int i=tx;i<2*LEN;i+=blockDim.x)
hist[i] = 0.0f;
float xp = d_Sift[bx].xpos;
float yp = d_Sift[bx].ypos;
int xi = (int)xp;
int yi = (int)yp;
float xf = xp - xi;
float yf = yp - yi;
for (int i=tx;i<WID*WID;i+=blockDim.x) {
int y = i/WID;
int x = i - y*WID;
int xp = max(min(x - RAD + xi, w - 1), 0);
int yp = max(min(y - RAD + yi, h - 1), 0);
img[y][x] = image[yp*p + xp];
}
float fac[5];
fac[1] = fac[3] = (sc>0.5f ? __expf(-1.0f/(2.0f*(sc*sc - 0.25f))) : 0.0f);
fac[0] = fac[4] = (sc>0.5f ? __expf(-4.0f/(2.0f*(sc*sc - 0.25f))) : 0.0f);
fac[2] = 1.0f;
float i2sigma2 = -1.0f/(2.0f*2.0f*2.0f*sc*sc); //%%%% Note: Lowe suggests 1.5, not 2.0
if (tx<WID) {
gaussx[tx] = __expf(i2sigma2*(tx-RAD-xf)*(tx-RAD-xf));
gaussy[tx] = __expf(i2sigma2*(tx-RAD-yf)*(tx-RAD-yf));
}
__syncthreads();
for (int i=tx;i<(WID-4)*WID;i+=blockDim.x) {
int y = i/WID;
int x = i - y*WID;
y += 2;
tmp[y][x] = img[y][x] + fac[1]*(img[y-1][x] + img[y+1][x]) +
fac[0]*(img[y-2][x] + img[y+2][x]);
}
__syncthreads();
for (int i=tx;i<(WID-4)*(WID-4);i+=blockDim.x) {
int y = i/(WID-4);
int x = i - y*(WID-4);
x += 2;
y += 2;
img[y][x] = tmp[y][x] + fac[1]*(tmp[y][x-1] + tmp[y][x+1]) +
fac[0]*(tmp[y][x-2] + tmp[y][x+2]);
}
__syncthreads();
for (int i=tx;i<(WID-6)*(WID-6);i+=blockDim.x) {
int y = i/(WID-6);
int x = i - y*(WID-6);
x += 3;
y += 3;
float dx = img[y][x+1] - img[y][x-1];
float dy = img[y+1][x] - img[y-1][x];
int bin = (int)((LEN/2)*atan2f(dy, dx)/3.1416f + (LEN/2) + 0.5f)%LEN;
float grad = __fsqrt_rn(dx*dx + dy*dy);
atomicAdd(&hist[LEN + bin], grad*gaussx[x]*gaussy[y]);
}
__syncthreads();
int x1m = (tx>=1 ? tx-1 : tx+LEN-1);
int x1p = (tx<(LEN-1) ? tx+1 : tx-LEN+1);
int x2m = (tx>=2 ? tx-2 : tx+LEN-2);
int x2p = (tx<(LEN-2) ? tx+2 : tx-LEN+2);
if (tx<LEN) {
hist[tx] = 6.0f*hist[tx + LEN] + 4.0f*(hist[x1m + LEN] + hist[x1p + LEN]) +
1.0f*(hist[x2m + LEN] + hist[x2p + LEN]);
hist[tx + LEN] = 8.0f*hist[tx] + 4.0f*(hist[x1m] + hist[x1p]) +
0.0f*(hist[x2m] + hist[x2p]);
float val = hist[tx + LEN];
hist[tx] = (val>hist[x1m + LEN] && val>=hist[x1p + LEN] ? val : 0.0f);
}
__syncthreads();
if (tx==0) {
float maxval1 = 0.0;
float maxval2 = 0.0;
int i1 = -1;
int i2 = -1;
for (int i=0;i<LEN;i++) {
float v = hist[i];
if (v>maxval1) {
maxval2 = maxval1;
maxval1 = v;
i2 = i1;
i1 = i;
} else if (v>maxval2) {
maxval2 = v;
i2 = i;
}
}
float val1 = hist[LEN + ((i1 + 1)%LEN)];
float val2 = hist[LEN + ((i1 + LEN - 1)%LEN)];
float peak = i1 + 0.5f*(val1 - val2) / (2.0f*maxval1 - val1 - val2);
d_Sift[bx].orientation = 360.0f*(peak<0.0f ? peak + LEN : peak)/LEN;
atomicMax(&d_PointCounter[2*octave+1], d_PointCounter[2*octave+0]);
if (maxval2>0.8f*maxval1 && true) {
float val1 = hist[LEN + ((i2 + 1)%LEN)];
float val2 = hist[LEN + ((i2 + LEN - 1)%LEN)];
float peak = i2 + 0.5f*(val1 - val2) / (2.0f*maxval2 - val1 - val2);
unsigned int idx = atomicInc(&d_PointCounter[2*octave+1], 0x7fffffff);
if (idx<d_MaxNumPoints) {
d_Sift[idx].xpos = d_Sift[bx].xpos;
d_Sift[idx].ypos = d_Sift[bx].ypos;
d_Sift[idx].scale = sc;
d_Sift[idx].sharpness = d_Sift[bx].sharpness;
d_Sift[idx].edgeness = d_Sift[bx].edgeness;
d_Sift[idx].orientation = 360.0f*(peak<0.0f ? peak + LEN : peak)/LEN;
d_Sift[idx].subsampling = d_Sift[bx].subsampling;
}
}
}
}
#undef RAD
#undef WID
#undef LEN
}
// With constant number of blocks
__global__ void ComputeOrientationsCONST(cudaTextureObject_t texObj, SiftPoint *d_Sift, int octave)
{
__shared__ float hist[64];
__shared__ float gauss[11];
const int tx = threadIdx.x;
int fstPts = min(d_PointCounter[2*octave-1], d_MaxNumPoints);
int totPts = min(d_PointCounter[2*octave+0], d_MaxNumPoints);
for (int bx = blockIdx.x + fstPts; bx < totPts; bx += gridDim.x) {
float i2sigma2 = -1.0f/(2.0f*1.5f*1.5f*d_Sift[bx].scale*d_Sift[bx].scale);
if (tx<11)
gauss[tx] = exp(i2sigma2*(tx-5)*(tx-5));
if (tx<64)
hist[tx] = 0.0f;
__syncthreads();
float xp = d_Sift[bx].xpos - 4.5f;
float yp = d_Sift[bx].ypos - 4.5f;
int yd = tx/11;
int xd = tx - yd*11;
float xf = xp + xd;
float yf = yp + yd;
if (yd<11) {
float dx = tex2D<float>(texObj, xf+1.0, yf) - tex2D<float>(texObj, xf-1.0, yf);
float dy = tex2D<float>(texObj, xf, yf+1.0) - tex2D<float>(texObj, xf, yf-1.0);
int bin = 16.0f*atan2f(dy, dx)/3.1416f + 16.5f;
if (bin>31)
bin = 0;
float grad = sqrtf(dx*dx + dy*dy);
atomicAdd(&hist[bin], grad*gauss[xd]*gauss[yd]);
}
__syncthreads();
int x1m = (tx>=1 ? tx-1 : tx+31);
int x1p = (tx<=30 ? tx+1 : tx-31);
if (tx<32) {
int x2m = (tx>=2 ? tx-2 : tx+30);
int x2p = (tx<=29 ? tx+2 : tx-30);
hist[tx+32] = 6.0f*hist[tx] + 4.0f*(hist[x1m] + hist[x1p]) + (hist[x2m] + hist[x2p]);
}
__syncthreads();
if (tx<32) {
float v = hist[32+tx];
hist[tx] = (v>hist[32+x1m] && v>=hist[32+x1p] ? v : 0.0f);
}
__syncthreads();
if (tx==0) {
float maxval1 = 0.0;
float maxval2 = 0.0;
int i1 = -1;
int i2 = -1;
for (int i=0;i<32;i++) {
float v = hist[i];
if (v>maxval1) {
maxval2 = maxval1;
maxval1 = v;
i2 = i1;
i1 = i;
} else if (v>maxval2) {
maxval2 = v;
i2 = i;
}
}
float val1 = hist[32+((i1+1)&31)];
float val2 = hist[32+((i1+31)&31)];
float peak = i1 + 0.5f*(val1-val2) / (2.0f*maxval1-val1-val2);
d_Sift[bx].orientation = 11.25f*(peak<0.0f ? peak+32.0f : peak);
atomicMax(&d_PointCounter[2*octave+1], d_PointCounter[2*octave+0]);
if (maxval2>0.8f*maxval1 && true) {
float val1 = hist[32+((i2+1)&31)];
float val2 = hist[32+((i2+31)&31)];
float peak = i2 + 0.5f*(val1-val2) / (2.0f*maxval2-val1-val2);
unsigned int idx = atomicInc(&d_PointCounter[2*octave+1], 0x7fffffff);
if (idx<d_MaxNumPoints) {
d_Sift[idx].xpos = d_Sift[bx].xpos;
d_Sift[idx].ypos = d_Sift[bx].ypos;
d_Sift[idx].scale = d_Sift[bx].scale;
d_Sift[idx].sharpness = d_Sift[bx].sharpness;
d_Sift[idx].edgeness = d_Sift[bx].edgeness;
d_Sift[idx].orientation = 11.25f*(peak<0.0f ? peak+32.0f : peak);;
d_Sift[idx].subsampling = d_Sift[bx].subsampling;
}
}
}
__syncthreads();
}
}
// With constant number of blocks
__global__ void OrientAndExtractCONST(cudaTextureObject_t texObj, SiftPoint *d_Sift, float subsampling, int octave)
{
__shared__ float hist[64];
__shared__ float gauss[11];
__shared__ unsigned int idx; //%%%%
const int tx = threadIdx.x;
int fstPts = min(d_PointCounter[2*octave-1], d_MaxNumPoints);
int totPts = min(d_PointCounter[2*octave+0], d_MaxNumPoints);
for (int bx = blockIdx.x + fstPts; bx < totPts; bx += gridDim.x) {
float i2sigma2 = -1.0f/(4.5f*d_Sift[bx].scale*d_Sift[bx].scale);
if (tx<11)
gauss[tx] = exp(i2sigma2*(tx-5)*(tx-5));
if (tx<64)
hist[tx] = 0.0f;
__syncthreads();
float xp = d_Sift[bx].xpos - 4.5f;
float yp = d_Sift[bx].ypos - 4.5f;
int yd = tx/11;
int xd = tx - yd*11;
float xf = xp + xd;
float yf = yp + yd;
if (yd<11) {
float dx = tex2D<float>(texObj, xf+1.0, yf) - tex2D<float>(texObj, xf-1.0, yf);
float dy = tex2D<float>(texObj, xf, yf+1.0) - tex2D<float>(texObj, xf, yf-1.0);
int bin = 16.0f*atan2f(dy, dx)/3.1416f + 16.5f;
if (bin>31)
bin = 0;
float grad = sqrtf(dx*dx + dy*dy);
atomicAdd(&hist[bin], grad*gauss[xd]*gauss[yd]);
}
__syncthreads();
int x1m = (tx>=1 ? tx-1 : tx+31);
int x1p = (tx<=30 ? tx+1 : tx-31);
if (tx<32) {
int x2m = (tx>=2 ? tx-2 : tx+30);
int x2p = (tx<=29 ? tx+2 : tx-30);
hist[tx+32] = 6.0f*hist[tx] + 4.0f*(hist[x1m] + hist[x1p]) + (hist[x2m] + hist[x2p]);
}
__syncthreads();
if (tx<32) {
float v = hist[32+tx];
hist[tx] = (v>hist[32+x1m] && v>=hist[32+x1p] ? v : 0.0f);
}
__syncthreads();
if (tx==0) {
float maxval1 = 0.0;
float maxval2 = 0.0;
int i1 = -1;
int i2 = -1;
for (int i=0;i<32;i++) {
float v = hist[i];
if (v>maxval1) {
maxval2 = maxval1;
maxval1 = v;
i2 = i1;
i1 = i;
} else if (v>maxval2) {
maxval2 = v;
i2 = i;
}
}
float val1 = hist[32+((i1+1)&31)];
float val2 = hist[32+((i1+31)&31)];
float peak = i1 + 0.5f*(val1-val2) / (2.0f*maxval1-val1-val2);
d_Sift[bx].orientation = 11.25f*(peak<0.0f ? peak+32.0f : peak);
idx = 0xffffffff; //%%%%
atomicMax(&d_PointCounter[2*octave+1], d_PointCounter[2*octave+0]);
if (maxval2>0.8f*maxval1) {
float val1 = hist[32+((i2+1)&31)];
float val2 = hist[32+((i2+31)&31)];
float peak = i2 + 0.5f*(val1-val2) / (2.0f*maxval2-val1-val2);
idx = atomicInc(&d_PointCounter[2*octave+1], 0x7fffffff); //%%%%
if (idx<d_MaxNumPoints) {
d_Sift[idx].xpos = d_Sift[bx].xpos;
d_Sift[idx].ypos = d_Sift[bx].ypos;
d_Sift[idx].scale = d_Sift[bx].scale;
d_Sift[idx].sharpness = d_Sift[bx].sharpness;
d_Sift[idx].edgeness = d_Sift[bx].edgeness;
d_Sift[idx].orientation = 11.25f*(peak<0.0f ? peak+32.0f : peak);;
d_Sift[idx].subsampling = d_Sift[bx].subsampling;
}
}
}
__syncthreads();
ExtractSiftDescriptor(texObj, d_Sift, subsampling, octave, bx); //%%%%
if (idx<d_MaxNumPoints) //%%%%
ExtractSiftDescriptor(texObj, d_Sift, subsampling, octave, idx); //%%%%
}
}
///////////////////////////////////////////////////////////////////////////////
// Subtract two images (multi-scale version)
///////////////////////////////////////////////////////////////////////////////
__global__ void FindPointsMultiTest(float *d_Data0, SiftPoint *d_Sift, int width, int pitch, int height, float subsampling, float lowestScale, float thresh, float factor, float edgeLimit, int octave)
{
#define MEMWID (MINMAX_W + 2)
__shared__ unsigned int cnt;
__shared__ unsigned short points[3*MEMWID];
if (blockIdx.x==0 && blockIdx.y==0 && threadIdx.x==0 && threadIdx.y==0) {
atomicMax(&d_PointCounter[2*octave+0], d_PointCounter[2*octave-1]);
atomicMax(&d_PointCounter[2*octave+1], d_PointCounter[2*octave-1]);
}
int tx = threadIdx.x;
int ty = threadIdx.y;
if (tx==0 && ty==0)
cnt = 0;
__syncthreads();
int ypos = MINMAX_H*blockIdx.y + ty;
if (ypos>=height)
return;
int block = blockIdx.x/NUM_SCALES;
int scale = blockIdx.x - NUM_SCALES*block;
int minx = block*MINMAX_W;
int maxx = min(minx + MINMAX_W, width);
int xpos = minx + tx;
int size = pitch*height;
int ptr = size*scale + max(min(xpos-1, width-1), 0);
float maxv = fabs(d_Data0[ptr + ypos*pitch + 1*size]);
maxv = fmaxf(maxv, ShiftDown(maxv, 16, MINMAX_W));
maxv = fmaxf(maxv, ShiftDown(maxv, 8, MINMAX_W));
maxv = fmaxf(maxv, ShiftDown(maxv, 4, MINMAX_W));
maxv = fmaxf(maxv, ShiftDown(maxv, 2, MINMAX_W));
maxv = fmaxf(maxv, ShiftDown(maxv, 1, MINMAX_W));
if (Shuffle(maxv, 0)>thresh) {
int yptr1 = ptr + ypos*pitch;
int yptr0 = ptr + max(0,ypos-1)*pitch;
int yptr2 = ptr + min(height-1,ypos+1)*pitch;
float d20 = d_Data0[yptr0 + 1*size];
float d21 = d_Data0[yptr1 + 1*size];
float d22 = d_Data0[yptr2 + 1*size];
float d31 = d_Data0[yptr1 + 2*size];
float d11 = d_Data0[yptr1];
float d10 = d_Data0[yptr0];
float d12 = d_Data0[yptr2];
float ymin1 = fminf(fminf(d10, d11), d12);
float ymax1 = fmaxf(fmaxf(d10, d11), d12);
float d30 = d_Data0[yptr0 + 2*size];
float d32 = d_Data0[yptr2 + 2*size];
float ymin3 = fminf(fminf(d30, d31), d32);
float ymax3 = fmaxf(fmaxf(d30, d31), d32);
float ymin2 = fminf(fminf(ymin1, fminf(fminf(d20, d22), d21)), ymin3);
float ymax2 = fmaxf(fmaxf(ymax1, fmaxf(fmaxf(d20, d22), d21)), ymax3);
float nmin2 = fminf(ShiftUp(ymin2, 1), ShiftDown(ymin2, 1));
float nmax2 = fmaxf(ShiftUp(ymax2, 1), ShiftDown(ymax2, 1));
if (tx>0 && tx<MINMAX_W+1 && xpos<=maxx) {
if (d21<-thresh) {
float minv = fminf(fminf(nmin2, ymin1), ymin3);
minv = fminf(fminf(minv, d20), d22);
if (d21<minv) {
int pos = atomicInc(&cnt, MEMWID-1);
points[3*pos+0] = xpos - 1;
points[3*pos+1] = ypos;
points[3*pos+2] = scale;
}
}
if (d21>thresh) {
float maxv = fmaxf(fmaxf(nmax2, ymax1), ymax3);
maxv = fmaxf(fmaxf(maxv, d20), d22);
if (d21>maxv) {
int pos = atomicInc(&cnt, MEMWID-1);
points[3*pos+0] = xpos - 1;
points[3*pos+1] = ypos;
points[3*pos+2] = scale;
}
}
}
}
__syncthreads();
if (ty==0 && tx<cnt) {
int xpos = points[3*tx+0];
int ypos = points[3*tx+1];
int scale = points[3*tx+2];
int ptr = xpos + (ypos + (scale+1)*height)*pitch;
float val = d_Data0[ptr];
float *data1 = &d_Data0[ptr];
float dxx = 2.0f*val - data1[-1] - data1[1];
float dyy = 2.0f*val - data1[-pitch] - data1[pitch];
float dxy = 0.25f*(data1[+pitch+1] + data1[-pitch-1] - data1[-pitch+1] - data1[+pitch-1]);
float tra = dxx + dyy;
float det = dxx*dyy - dxy*dxy;
if (tra*tra<edgeLimit*det) {
float edge = __fdividef(tra*tra, det);
float dx = 0.5f*(data1[1] - data1[-1]);
float dy = 0.5f*(data1[pitch] - data1[-pitch]);
float *data0 = d_Data0 + ptr - height*pitch;
float *data2 = d_Data0 + ptr + height*pitch;
float ds = 0.5f*(data0[0] - data2[0]);
float dss = 2.0f*val - data2[0] - data0[0];
float dxs = 0.25f*(data2[1] + data0[-1] - data0[1] - data2[-1]);
float dys = 0.25f*(data2[pitch] + data0[-pitch] - data2[-pitch] - data0[pitch]);
float idxx = dyy*dss - dys*dys;
float idxy = dys*dxs - dxy*dss;
float idxs = dxy*dys - dyy*dxs;
float idet = __fdividef(1.0f, idxx*dxx + idxy*dxy + idxs*dxs);
float idyy = dxx*dss - dxs*dxs;
float idys = dxy*dxs - dxx*dys;
float idss = dxx*dyy - dxy*dxy;
float pdx = idet*(idxx*dx + idxy*dy + idxs*ds);
float pdy = idet*(idxy*dx + idyy*dy + idys*ds);
float pds = idet*(idxs*dx + idys*dy + idss*ds);
if (pdx<-0.5f || pdx>0.5f || pdy<-0.5f || pdy>0.5f || pds<-0.5f || pds>0.5f) {
pdx = __fdividef(dx, dxx);
pdy = __fdividef(dy, dyy);
pds = __fdividef(ds, dss);
}
float dval = 0.5f*(dx*pdx + dy*pdy + ds*pds);
int maxPts = d_MaxNumPoints;
float sc = powf(2.0f, (float)scale/NUM_SCALES) * exp2f(pds*factor);
if (sc>=lowestScale) {
unsigned int idx = atomicInc(&d_PointCounter[2*octave+0], 0x7fffffff);
idx = (idx>=maxPts ? maxPts-1 : idx);
d_Sift[idx].xpos = xpos + pdx;
d_Sift[idx].ypos = ypos + pdy;
d_Sift[idx].scale = sc;
d_Sift[idx].sharpness = val + dval;
d_Sift[idx].edgeness = edge;
d_Sift[idx].subsampling = subsampling;
}
}
}
}
__global__ void FindPointsMultiNew(float *d_Data0, SiftPoint *d_Sift, int width, int pitch, int height, float subsampling, float lowestScale, float thresh, float factor, float edgeLimit, int octave)
{
#define MEMWID (MINMAX_W + 2)
__shared__ unsigned short points[2*MEMWID];
if (blockIdx.x==0 && blockIdx.y==0 && threadIdx.x==0) {
atomicMax(&d_PointCounter[2*octave+0], d_PointCounter[2*octave-1]);
atomicMax(&d_PointCounter[2*octave+1], d_PointCounter[2*octave-1]);
}
int tx = threadIdx.x;
int block = blockIdx.x/NUM_SCALES;
int scale = blockIdx.x - NUM_SCALES*block;
int minx = block*MINMAX_W;
int maxx = min(minx + MINMAX_W, width);
int xpos = minx + tx;
int size = pitch*height;
int ptr = size*scale + max(min(xpos-1, width-1), 0);
int yloops = min(height - MINMAX_H*blockIdx.y, MINMAX_H);
float maxv = 0.0f;
for (int y=0;y<yloops;y++) {
int ypos = MINMAX_H*blockIdx.y + y;
int yptr1 = ptr + ypos*pitch;
float val = d_Data0[yptr1 + 1*size];
maxv = fmaxf(maxv, fabs(val));
}
//if (tx==0) printf("XXX1\n");
if (!__any_sync(0xffffffff, maxv>thresh))
return;
//if (tx==0) printf("XXX2\n");
int ptbits = 0;
for (int y=0;y<yloops;y++) {
int ypos = MINMAX_H*blockIdx.y + y;
int yptr1 = ptr + ypos*pitch;
float d11 = d_Data0[yptr1 + 1*size];
if (__any_sync(0xffffffff, fabs(d11)>thresh)) {
int yptr0 = ptr + max(0,ypos-1)*pitch;
int yptr2 = ptr + min(height-1,ypos+1)*pitch;
float d01 = d_Data0[yptr1];
float d10 = d_Data0[yptr0 + 1*size];
float d12 = d_Data0[yptr2 + 1*size];
float d21 = d_Data0[yptr1 + 2*size];
float d00 = d_Data0[yptr0];
float d02 = d_Data0[yptr2];
float ymin1 = fminf(fminf(d00, d01), d02);
float ymax1 = fmaxf(fmaxf(d00, d01), d02);
float d20 = d_Data0[yptr0 + 2*size];
float d22 = d_Data0[yptr2 + 2*size];
float ymin3 = fminf(fminf(d20, d21), d22);
float ymax3 = fmaxf(fmaxf(d20, d21), d22);
float ymin2 = fminf(fminf(ymin1, fminf(fminf(d10, d12), d11)), ymin3);
float ymax2 = fmaxf(fmaxf(ymax1, fmaxf(fmaxf(d10, d12), d11)), ymax3);
float nmin2 = fminf(ShiftUp(ymin2, 1), ShiftDown(ymin2, 1));
float nmax2 = fmaxf(ShiftUp(ymax2, 1), ShiftDown(ymax2, 1));
float minv = fminf(fminf(nmin2, ymin1), ymin3);
minv = fminf(fminf(minv, d10), d12);
float maxv = fmaxf(fmaxf(nmax2, ymax1), ymax3);
maxv = fmaxf(fmaxf(maxv, d10), d12);
if (tx>0 && tx<MINMAX_W+1 && xpos<=maxx)
ptbits |= ((d11 < fminf(-thresh, minv)) | (d11 > fmaxf(thresh, maxv))) << y;
}
}
unsigned int totbits = __popc(ptbits);
unsigned int numbits = totbits;
for (int d=1;d<32;d<<=1) {
unsigned int num = ShiftUp(totbits, d);
if (tx >= d)
totbits += num;
}
int pos = totbits - numbits;
for (int y=0;y<yloops;y++) {
int ypos = MINMAX_H*blockIdx.y + y;
if (ptbits & (1 << y) && pos<MEMWID) {
points[2*pos + 0] = xpos - 1;
points[2*pos + 1] = ypos;
pos ++;
}
}
totbits = Shuffle(totbits, 31);
if (tx<totbits) {
int xpos = points[2*tx + 0];
int ypos = points[2*tx + 1];
int ptr = xpos + (ypos + (scale + 1)*height)*pitch;
float val = d_Data0[ptr];
float *data1 = &d_Data0[ptr];
float dxx = 2.0f*val - data1[-1] - data1[1];
float dyy = 2.0f*val - data1[-pitch] - data1[pitch];
float dxy = 0.25f*(data1[+pitch+1] + data1[-pitch-1] - data1[-pitch+1] - data1[+pitch-1]);
float tra = dxx + dyy;
float det = dxx*dyy - dxy*dxy;
if (tra*tra<edgeLimit*det) {
float edge = __fdividef(tra*tra, det);
float dx = 0.5f*(data1[1] - data1[-1]);
float dy = 0.5f*(data1[pitch] - data1[-pitch]);
float *data0 = d_Data0 + ptr - height*pitch;
float *data2 = d_Data0 + ptr + height*pitch;
float ds = 0.5f*(data0[0] - data2[0]);
float dss = 2.0f*val - data2[0] - data0[0];
float dxs = 0.25f*(data2[1] + data0[-1] - data0[1] - data2[-1]);
float dys = 0.25f*(data2[pitch] + data0[-pitch] - data2[-pitch] - data0[pitch]);
float idxx = dyy*dss - dys*dys;
float idxy = dys*dxs - dxy*dss;
float idxs = dxy*dys - dyy*dxs;
float idet = __fdividef(1.0f, idxx*dxx + idxy*dxy + idxs*dxs);
float idyy = dxx*dss - dxs*dxs;
float idys = dxy*dxs - dxx*dys;
float idss = dxx*dyy - dxy*dxy;
float pdx = idet*(idxx*dx + idxy*dy + idxs*ds);
float pdy = idet*(idxy*dx + idyy*dy + idys*ds);
float pds = idet*(idxs*dx + idys*dy + idss*ds);
if (pdx<-0.5f || pdx>0.5f || pdy<-0.5f || pdy>0.5f || pds<-0.5f || pds>0.5f) {
pdx = __fdividef(dx, dxx);
pdy = __fdividef(dy, dyy);
pds = __fdividef(ds, dss);
}
float dval = 0.5f*(dx*pdx + dy*pdy + ds*pds);
int maxPts = d_MaxNumPoints;
float sc = powf(2.0f, (float)scale/NUM_SCALES) * exp2f(pds*factor);
if (sc>=lowestScale) {
atomicMax(&d_PointCounter[2*octave+0], d_PointCounter[2*octave-1]);
unsigned int idx = atomicInc(&d_PointCounter[2*octave+0], 0x7fffffff);
idx = (idx>=maxPts ? maxPts-1 : idx);
d_Sift[idx].xpos = xpos + pdx;
d_Sift[idx].ypos = ypos + pdy;
d_Sift[idx].scale = sc;
d_Sift[idx].sharpness = val + dval;
d_Sift[idx].edgeness = edge;
d_Sift[idx].subsampling = subsampling;
}
}
}
}
__global__ void FindPointsMulti(float *d_Data0, SiftPoint *d_Sift, int width, int pitch, int height, float subsampling, float lowestScale, float thresh, float factor, float edgeLimit, int octave)
{
#define MEMWID (MINMAX_W + 2)
__shared__ unsigned int cnt;
__shared__ unsigned short points[3*MEMWID];
if (blockIdx.x==0 && blockIdx.y==0 && threadIdx.x==0) {
atomicMax(&d_PointCounter[2*octave+0], d_PointCounter[2*octave-1]);
atomicMax(&d_PointCounter[2*octave+1], d_PointCounter[2*octave-1]);
}
int tx = threadIdx.x;
int block = blockIdx.x/NUM_SCALES;
int scale = blockIdx.x - NUM_SCALES*block;
int minx = block*MINMAX_W;
int maxx = min(minx + MINMAX_W, width);
int xpos = minx + tx;
int size = pitch*height;
int ptr = size*scale + max(min(xpos-1, width-1), 0);
int yloops = min(height - MINMAX_H*blockIdx.y, MINMAX_H);
float maxv = 0.0f;
for (int y=0;y<yloops;y++) {
int ypos = MINMAX_H*blockIdx.y + y;
int yptr1 = ptr + ypos*pitch;
float val = d_Data0[yptr1 + 1*size];
maxv = fmaxf(maxv, fabs(val));
}
maxv = fmaxf(maxv, ShiftDown(maxv, 16, MINMAX_W));
maxv = fmaxf(maxv, ShiftDown(maxv, 8, MINMAX_W));
maxv = fmaxf(maxv, ShiftDown(maxv, 4, MINMAX_W));
maxv = fmaxf(maxv, ShiftDown(maxv, 2, MINMAX_W));
maxv = fmaxf(maxv, ShiftDown(maxv, 1, MINMAX_W));
if (Shuffle(maxv, 0)<=thresh)
return;
if (tx==0)
cnt = 0;
__syncthreads();
for (int y=0;y<yloops;y++) {
int ypos = MINMAX_H*blockIdx.y + y;
int yptr1 = ptr + ypos*pitch;
int yptr0 = ptr + max(0,ypos-1)*pitch;
int yptr2 = ptr + min(height-1,ypos+1)*pitch;
float d20 = d_Data0[yptr0 + 1*size];
float d21 = d_Data0[yptr1 + 1*size];
float d22 = d_Data0[yptr2 + 1*size];
float d31 = d_Data0[yptr1 + 2*size];
float d11 = d_Data0[yptr1];
float d10 = d_Data0[yptr0];
float d12 = d_Data0[yptr2];
float ymin1 = fminf(fminf(d10, d11), d12);
float ymax1 = fmaxf(fmaxf(d10, d11), d12);
float d30 = d_Data0[yptr0 + 2*size];
float d32 = d_Data0[yptr2 + 2*size];
float ymin3 = fminf(fminf(d30, d31), d32);
float ymax3 = fmaxf(fmaxf(d30, d31), d32);
float ymin2 = fminf(fminf(ymin1, fminf(fminf(d20, d22), d21)), ymin3);
float ymax2 = fmaxf(fmaxf(ymax1, fmaxf(fmaxf(d20, d22), d21)), ymax3);
float nmin2 = fminf(ShiftUp(ymin2, 1), ShiftDown(ymin2, 1));
float nmax2 = fmaxf(ShiftUp(ymax2, 1), ShiftDown(ymax2, 1));
if (tx>0 && tx<MINMAX_W+1 && xpos<=maxx) {
if (d21<-thresh) {
float minv = fminf(fminf(nmin2, ymin1), ymin3);
minv = fminf(fminf(minv, d20), d22);
if (d21<minv) {
int pos = atomicInc(&cnt, MEMWID-1);
points[3*pos+0] = xpos - 1;
points[3*pos+1] = ypos;
points[3*pos+2] = scale;
}
}
if (d21>thresh) {
float maxv = fmaxf(fmaxf(nmax2, ymax1), ymax3);
maxv = fmaxf(fmaxf(maxv, d20), d22);
if (d21>maxv) {
int pos = atomicInc(&cnt, MEMWID-1);
points[3*pos+0] = xpos - 1;
points[3*pos+1] = ypos;
points[3*pos+2] = scale;
}
}
}
}
if (tx<cnt) {
int xpos = points[3*tx+0];
int ypos = points[3*tx+1];
int scale = points[3*tx+2];
int ptr = xpos + (ypos + (scale+1)*height)*pitch;
float val = d_Data0[ptr];
float *data1 = &d_Data0[ptr];
float dxx = 2.0f*val - data1[-1] - data1[1];
float dyy = 2.0f*val - data1[-pitch] - data1[pitch];
float dxy = 0.25f*(data1[+pitch+1] + data1[-pitch-1] - data1[-pitch+1] - data1[+pitch-1]);
float tra = dxx + dyy;
float det = dxx*dyy - dxy*dxy;
if (tra*tra<edgeLimit*det) {
float edge = __fdividef(tra*tra, det);
float dx = 0.5f*(data1[1] - data1[-1]);
float dy = 0.5f*(data1[pitch] - data1[-pitch]);
float *data0 = d_Data0 + ptr - height*pitch;
float *data2 = d_Data0 + ptr + height*pitch;
float ds = 0.5f*(data0[0] - data2[0]);
float dss = 2.0f*val - data2[0] - data0[0];
float dxs = 0.25f*(data2[1] + data0[-1] - data0[1] - data2[-1]);
float dys = 0.25f*(data2[pitch] + data0[-pitch] - data2[-pitch] - data0[pitch]);
float idxx = dyy*dss - dys*dys;
float idxy = dys*dxs - dxy*dss;
float idxs = dxy*dys - dyy*dxs;
float idet = __fdividef(1.0f, idxx*dxx + idxy*dxy + idxs*dxs);
float idyy = dxx*dss - dxs*dxs;
float idys = dxy*dxs - dxx*dys;
float idss = dxx*dyy - dxy*dxy;
float pdx = idet*(idxx*dx + idxy*dy + idxs*ds);
float pdy = idet*(idxy*dx + idyy*dy + idys*ds);
float pds = idet*(idxs*dx + idys*dy + idss*ds);
if (pdx<-0.5f || pdx>0.5f || pdy<-0.5f || pdy>0.5f || pds<-0.5f || pds>0.5f) {
pdx = __fdividef(dx, dxx);
pdy = __fdividef(dy, dyy);
pds = __fdividef(ds, dss);
}
float dval = 0.5f*(dx*pdx + dy*pdy + ds*pds);
int maxPts = d_MaxNumPoints;
float sc = powf(2.0f, (float)scale/NUM_SCALES) * exp2f(pds*factor);
if (sc>=lowestScale) {
atomicMax(&d_PointCounter[2*octave+0], d_PointCounter[2*octave-1]);
unsigned int idx = atomicInc(&d_PointCounter[2*octave+0], 0x7fffffff);
idx = (idx>=maxPts ? maxPts-1 : idx);
d_Sift[idx].xpos = xpos + pdx;
d_Sift[idx].ypos = ypos + pdy;
d_Sift[idx].scale = sc;
d_Sift[idx].sharpness = val + dval;
d_Sift[idx].edgeness = edge;
d_Sift[idx].subsampling = subsampling;
}
}
}
}
__global__ void FindPointsMultiOld(float *d_Data0, SiftPoint *d_Sift, int width, int pitch, int height, float subsampling, float lowestScale, float thresh, float factor, float edgeLimit, int octave)
{
#define MEMWID (MINMAX_W + 2)
__shared__ float ymin1[MEMWID], ymin2[MEMWID], ymin3[MEMWID];
__shared__ float ymax1[MEMWID], ymax2[MEMWID], ymax3[MEMWID];
__shared__ unsigned int cnt;
__shared__ unsigned short points[3*MEMWID];
if (blockIdx.x==0 && blockIdx.y==0 && threadIdx.x==0) {
atomicMax(&d_PointCounter[2*octave+0], d_PointCounter[2*octave-1]);
atomicMax(&d_PointCounter[2*octave+1], d_PointCounter[2*octave-1]);
}
int tx = threadIdx.x;
int block = blockIdx.x/NUM_SCALES;
int scale = blockIdx.x - NUM_SCALES*block;
int minx = block*MINMAX_W;
int maxx = min(minx + MINMAX_W, width);
int xpos = minx + tx;
int size = pitch*height;
int ptr = size*scale + max(min(xpos-1, width-1), 0);
int yloops = min(height - MINMAX_H*blockIdx.y, MINMAX_H);
float maxv = 0.0f;
for (int y=0;y<yloops;y++) {
int ypos = MINMAX_H*blockIdx.y + y;
int yptr1 = ptr + ypos*pitch;
float val = d_Data0[yptr1 + 1*size];
maxv = fmaxf(maxv, fabs(val));
}
maxv = fmaxf(maxv, ShiftDown(maxv, 16, MINMAX_W));
maxv = fmaxf(maxv, ShiftDown(maxv, 8, MINMAX_W));
maxv = fmaxf(maxv, ShiftDown(maxv, 4, MINMAX_W));
maxv = fmaxf(maxv, ShiftDown(maxv, 2, MINMAX_W));
maxv = fmaxf(maxv, ShiftDown(maxv, 1, MINMAX_W));
if (Shuffle(maxv, 0)<=thresh)
return;
if (tx==0)
cnt = 0;
__syncthreads();
for (int y=0;y<yloops;y++) {
int ypos = MINMAX_H*blockIdx.y + y;
int yptr1 = ptr + ypos*pitch;
int yptr0 = ptr + max(0,ypos-1)*pitch;
int yptr2 = ptr + min(height-1,ypos+1)*pitch;
float d20 = d_Data0[yptr0 + 1*size];
float d21 = d_Data0[yptr1 + 1*size];
float d22 = d_Data0[yptr2 + 1*size];
float d31 = d_Data0[yptr1 + 2*size];
float d11 = d_Data0[yptr1];
float d10 = d_Data0[yptr0];
float d12 = d_Data0[yptr2];
ymin1[tx] = fminf(fminf(d10, d11), d12);
ymax1[tx] = fmaxf(fmaxf(d10, d11), d12);
float d30 = d_Data0[yptr0 + 2*size];
float d32 = d_Data0[yptr2 + 2*size];
ymin3[tx] = fminf(fminf(d30, d31), d32);
ymax3[tx] = fmaxf(fmaxf(d30, d31), d32);
ymin2[tx] = fminf(fminf(ymin1[tx], fminf(fminf(d20, d22), d21)), ymin3[tx]);
ymax2[tx] = fmaxf(fmaxf(ymax1[tx], fmaxf(fmaxf(d20, d22), d21)), ymax3[tx]);
__syncthreads();
if (tx>0 && tx<MINMAX_W+1 && xpos<=maxx) {
if (d21<-thresh) {
float minv = fminf(fminf(fminf(ymin2[tx-1], ymin2[tx+1]), ymin1[tx]), ymin3[tx]);
minv = fminf(fminf(minv, d20), d22);
if (d21<minv) {
int pos = atomicInc(&cnt, MEMWID-1);
points[3*pos+0] = xpos - 1;
points[3*pos+1] = ypos;
points[3*pos+2] = scale;
}
}
if (d21>thresh) {
float maxv = fmaxf(fmaxf(fmaxf(ymax2[tx-1], ymax2[tx+1]), ymax1[tx]), ymax3[tx]);
maxv = fmaxf(fmaxf(maxv, d20), d22);
if (d21>maxv) {
int pos = atomicInc(&cnt, MEMWID-1);
points[3*pos+0] = xpos - 1;
points[3*pos+1] = ypos;
points[3*pos+2] = scale;
}
}
}
__syncthreads();
}
if (tx<cnt) {
int xpos = points[3*tx+0];
int ypos = points[3*tx+1];
int scale = points[3*tx+2];
int ptr = xpos + (ypos + (scale+1)*height)*pitch;
float val = d_Data0[ptr];
float *data1 = &d_Data0[ptr];
float dxx = 2.0f*val - data1[-1] - data1[1];
float dyy = 2.0f*val - data1[-pitch] - data1[pitch];
float dxy = 0.25f*(data1[+pitch+1] + data1[-pitch-1] - data1[-pitch+1] - data1[+pitch-1]);
float tra = dxx + dyy;
float det = dxx*dyy - dxy*dxy;
if (tra*tra<edgeLimit*det) {
float edge = __fdividef(tra*tra, det);
float dx = 0.5f*(data1[1] - data1[-1]);
float dy = 0.5f*(data1[pitch] - data1[-pitch]);
float *data0 = d_Data0 + ptr - height*pitch;
float *data2 = d_Data0 + ptr + height*pitch;
float ds = 0.5f*(data0[0] - data2[0]);
float dss = 2.0f*val - data2[0] - data0[0];
float dxs = 0.25f*(data2[1] + data0[-1] - data0[1] - data2[-1]);
float dys = 0.25f*(data2[pitch] + data0[-pitch] - data2[-pitch] - data0[pitch]);
float idxx = dyy*dss - dys*dys;
float idxy = dys*dxs - dxy*dss;
float idxs = dxy*dys - dyy*dxs;
float idet = __fdividef(1.0f, idxx*dxx + idxy*dxy + idxs*dxs);
float idyy = dxx*dss - dxs*dxs;
float idys = dxy*dxs - dxx*dys;
float idss = dxx*dyy - dxy*dxy;
float pdx = idet*(idxx*dx + idxy*dy + idxs*ds);
float pdy = idet*(idxy*dx + idyy*dy + idys*ds);
float pds = idet*(idxs*dx + idys*dy + idss*ds);
if (pdx<-0.5f || pdx>0.5f || pdy<-0.5f || pdy>0.5f || pds<-0.5f || pds>0.5f) {
pdx = __fdividef(dx, dxx);
pdy = __fdividef(dy, dyy);
pds = __fdividef(ds, dss);
}
float dval = 0.5f*(dx*pdx + dy*pdy + ds*pds);
int maxPts = d_MaxNumPoints;
float sc = powf(2.0f, (float)scale/NUM_SCALES) * exp2f(pds*factor);
if (sc>=lowestScale) {
unsigned int idx = atomicInc(&d_PointCounter[2*octave+0], 0x7fffffff);
idx = (idx>=maxPts ? maxPts-1 : idx);
d_Sift[idx].xpos = xpos + pdx;
d_Sift[idx].ypos = ypos + pdy;
d_Sift[idx].scale = sc;
d_Sift[idx].sharpness = val + dval;
d_Sift[idx].edgeness = edge;
d_Sift[idx].subsampling = subsampling;
}
}
}
}
__global__ void LaplaceMultiTex(cudaTextureObject_t texObj, float *d_Result, int width, int pitch, int height, int octave)
{
__shared__ float data1[(LAPLACE_W + 2*LAPLACE_R)*LAPLACE_S];
__shared__ float data2[LAPLACE_W*LAPLACE_S];
const int tx = threadIdx.x;
const int xp = blockIdx.x*LAPLACE_W + tx;
const int yp = blockIdx.y;
const int scale = threadIdx.y;
float *kernel = d_LaplaceKernel + octave*12*16 + scale*16;
float *sdata1 = data1 + (LAPLACE_W + 2*LAPLACE_R)*scale;
float x = xp-3.5;
float y = yp+0.5;
sdata1[tx] = kernel[0]*tex2D<float>(texObj, x, y) +
kernel[1]*(tex2D<float>(texObj, x, y-1.0) + tex2D<float>(texObj, x, y+1.0)) +
kernel[2]*(tex2D<float>(texObj, x, y-2.0) + tex2D<float>(texObj, x, y+2.0)) +
kernel[3]*(tex2D<float>(texObj, x, y-3.0) + tex2D<float>(texObj, x, y+3.0)) +
kernel[4]*(tex2D<float>(texObj, x, y-4.0) + tex2D<float>(texObj, x, y+4.0));
__syncthreads();
float *sdata2 = data2 + LAPLACE_W*scale;
if (tx<LAPLACE_W) {
sdata2[tx] = kernel[0]*sdata1[tx+4] +
kernel[1]*(sdata1[tx+3] + sdata1[tx+5]) +
kernel[2]*(sdata1[tx+2] + sdata1[tx+6]) +
kernel[3]*(sdata1[tx+1] + sdata1[tx+7]) +
kernel[4]*(sdata1[tx+0] + sdata1[tx+8]);
}
__syncthreads();
if (tx<LAPLACE_W && scale<LAPLACE_S-1 && xp<width)
d_Result[scale*height*pitch + yp*pitch + xp] = sdata2[tx] - sdata2[tx+LAPLACE_W];
}
__global__ void LaplaceMultiMem(float *d_Image, float *d_Result, int width, int pitch, int height, int octave)
{
__shared__ float buff[(LAPLACE_W + 2*LAPLACE_R)*LAPLACE_S];
const int tx = threadIdx.x;
const int xp = blockIdx.x*LAPLACE_W + tx;
const int yp = blockIdx.y;
float *data = d_Image + max(min(xp - LAPLACE_R, width-1), 0);
float temp[2*LAPLACE_R + 1], kern[LAPLACE_S][LAPLACE_R + 1];
if (xp<(width + 2*LAPLACE_R)) {
for (int i=0;i<=2*LAPLACE_R;i++)
temp[i] = data[max(0, min(yp + i - LAPLACE_R, height - 1))*pitch];
for (int scale=0;scale<LAPLACE_S;scale++) {
float *buf = buff + (LAPLACE_W + 2*LAPLACE_R)*scale;
float *kernel = d_LaplaceKernel + octave*12*16 + scale*16;
for (int i=0;i<=LAPLACE_R;i++)
kern[scale][i] = kernel[i];
float sum = kern[scale][0]*temp[LAPLACE_R];
#pragma unroll
for (int j=1;j<=LAPLACE_R;j++)
sum += kern[scale][j]*(temp[LAPLACE_R - j] + temp[LAPLACE_R + j]);
buf[tx] = sum;
}
}
__syncthreads();
if (tx<LAPLACE_W && xp<width) {
int scale = 0;
float oldRes = kern[scale][0]*buff[tx + LAPLACE_R];
#pragma unroll
for (int j=1;j<=LAPLACE_R;j++)
oldRes += kern[scale][j]*(buff[tx + LAPLACE_R - j] + buff[tx + LAPLACE_R + j]);
for (int scale=1;scale<LAPLACE_S;scale++) {
float *buf = buff + (LAPLACE_W + 2*LAPLACE_R)*scale;
float res = kern[scale][0]*buf[tx + LAPLACE_R];
#pragma unroll
for (int j=1;j<=LAPLACE_R;j++)
res += kern[scale][j]*(buf[tx + LAPLACE_R - j] + buf[tx + LAPLACE_R + j]);
d_Result[(scale-1)*height*pitch + yp*pitch + xp] = res - oldRes;
oldRes = res;
}
}
}
__global__ void LaplaceMultiMemWide(float *d_Image, float *d_Result, int width, int pitch, int height, int octave)
{
__shared__ float buff[(LAPLACE_W + 2*LAPLACE_R)*LAPLACE_S];
const int tx = threadIdx.x;
const int xp = blockIdx.x*LAPLACE_W + tx;
const int xp4 = blockIdx.x*LAPLACE_W + 4*tx;
const int yp = blockIdx.y;
float kern[LAPLACE_S][LAPLACE_R+1];
float *data = d_Image + max(min(xp - 4, width-1), 0);
float temp[9];
if (xp<(width + 2*LAPLACE_R)) {
for (int i=0;i<4;i++)
temp[i] = data[max(0, min(yp+i-4, height-1))*pitch];
for (int i=4;i<8+1;i++)
temp[i] = data[min(yp+i-4, height-1)*pitch];
for (int scale=0;scale<LAPLACE_S;scale++) {
float *kernel = d_LaplaceKernel + octave*12*16 + scale*16;
for (int i=0;i<=LAPLACE_R;i++)
kern[scale][i] = kernel[LAPLACE_R - i];
float *buf = buff + (LAPLACE_W + 2*LAPLACE_R)*scale;
buf[tx] = kern[scale][4]*temp[4] +
kern[scale][3]*(temp[3] + temp[5]) + kern[scale][2]*(temp[2] + temp[6]) +
kern[scale][1]*(temp[1] + temp[7]) + kern[scale][0]*(temp[0] + temp[8]);
}
}
__syncthreads();
if (tx<LAPLACE_W/4 && xp4<width) {
float4 b0 = reinterpret_cast<float4*>(buff)[tx+0];
float4 b1 = reinterpret_cast<float4*>(buff)[tx+1];
float4 b2 = reinterpret_cast<float4*>(buff)[tx+2];
float4 old4, new4, dif4;
old4.x = kern[0][4]*b1.x + kern[0][3]*(b0.w + b1.y) + kern[0][2]*(b0.z + b1.z) +
kern[0][1]*(b0.y + b1.w) + kern[0][0]*(b0.x + b2.x);
old4.y = kern[0][4]*b1.y + kern[0][3]*(b1.x + b1.z) + kern[0][2]*(b0.w + b1.w) +
kern[0][1]*(b0.z + b2.x) + kern[0][0]*(b0.y + b2.y);
old4.z = kern[0][4]*b1.z + kern[0][3]*(b1.y + b1.w) + kern[0][2]*(b1.x + b2.x) +
kern[0][1]*(b0.w + b2.y) + kern[0][0]*(b0.z + b2.z);
old4.w = kern[0][4]*b1.w + kern[0][3]*(b1.z + b2.x) + kern[0][2]*(b1.y + b2.y) +
kern[0][1]*(b1.x + b2.z) + kern[0][0]*(b0.w + b2.w);
for (int scale=1;scale<LAPLACE_S;scale++) {
float *buf = buff + (LAPLACE_W + 2*LAPLACE_R)*scale;
float4 b0 = reinterpret_cast<float4*>(buf)[tx+0];
float4 b1 = reinterpret_cast<float4*>(buf)[tx+1];
float4 b2 = reinterpret_cast<float4*>(buf)[tx+2];
new4.x = kern[scale][4]*b1.x + kern[scale][3]*(b0.w + b1.y) +
kern[scale][2]*(b0.z + b1.z) + kern[scale][1]*(b0.y + b1.w) +
kern[scale][0]*(b0.x + b2.x);
new4.y = kern[scale][4]*b1.y + kern[scale][3]*(b1.x + b1.z) +
kern[scale][2]*(b0.w + b1.w) + kern[scale][1]*(b0.z + b2.x) +
kern[scale][0]*(b0.y + b2.y);
new4.z = kern[scale][4]*b1.z + kern[scale][3]*(b1.y + b1.w) +
kern[scale][2]*(b1.x + b2.x) + kern[scale][1]*(b0.w + b2.y) +
kern[scale][0]*(b0.z + b2.z);
new4.w = kern[scale][4]*b1.w + kern[scale][3]*(b1.z + b2.x) +
kern[scale][2]*(b1.y + b2.y) + kern[scale][1]*(b1.x + b2.z) +
kern[scale][0]*(b0.w + b2.w);
dif4.x = new4.x - old4.x;
dif4.y = new4.y - old4.y;
dif4.z = new4.z - old4.z;
dif4.w = new4.w - old4.w;
reinterpret_cast<float4*>(&d_Result[(scale-1)*height*pitch + yp*pitch + xp4])[0] = dif4;
old4 = new4;
}
}
}
__global__ void LaplaceMultiMemTest(float *d_Image, float *d_Result, int width, int pitch, int height, int octave)
{
__shared__ float data1[(LAPLACE_W + 2*LAPLACE_R)*LAPLACE_S];
__shared__ float data2[LAPLACE_W*LAPLACE_S];
const int tx = threadIdx.x;
const int xp = blockIdx.x*LAPLACE_W + tx;
const int yp = LAPLACE_H*blockIdx.y;
const int scale = threadIdx.y;
float *kernel = d_LaplaceKernel + octave*12*16 + scale*16;
float *sdata1 = data1 + (LAPLACE_W + 2*LAPLACE_R)*scale;
float *data = d_Image + max(min(xp - 4, width-1), 0);
int h = height-1;
float temp[8+LAPLACE_H], kern[LAPLACE_R+1];
for (int i=0;i<4;i++)
temp[i] = data[max(0, min(yp+i-4, h))*pitch];
for (int i=4;i<8+LAPLACE_H;i++)
temp[i] = data[min(yp+i-4, h)*pitch];
for (int i=0;i<=LAPLACE_R;i++)
kern[i] = kernel[LAPLACE_R - i];
for (int j=0;j<LAPLACE_H;j++) {
sdata1[tx] = kern[4]*temp[4+j] +
kern[3]*(temp[3+j] + temp[5+j]) + kern[2]*(temp[2+j] + temp[6+j]) +
kern[1]*(temp[1+j] + temp[7+j]) + kern[0]*(temp[0+j] + temp[8+j]);
__syncthreads();
float *sdata2 = data2 + LAPLACE_W*scale;
if (tx<LAPLACE_W) {
sdata2[tx] = kern[4]*sdata1[tx+4] +
kern[3]*(sdata1[tx+3] + sdata1[tx+5]) + kern[2]*(sdata1[tx+2] + sdata1[tx+6]) +
kern[1]*(sdata1[tx+1] + sdata1[tx+7]) + kern[0]*(sdata1[tx+0] + sdata1[tx+8]);
}
__syncthreads();
if (tx<LAPLACE_W && scale<LAPLACE_S-1 && xp<width && (yp+j)<height)
d_Result[scale*height*pitch + (yp+j)*pitch + xp] = sdata2[tx] - sdata2[tx+LAPLACE_W];
}
}
__global__ void LaplaceMultiMemOld(float *d_Image, float *d_Result, int width, int pitch, int height, int octave)
{
__shared__ float data1[(LAPLACE_W + 2*LAPLACE_R)*LAPLACE_S];
__shared__ float data2[LAPLACE_W*LAPLACE_S];
const int tx = threadIdx.x;
const int xp = blockIdx.x*LAPLACE_W + tx;
const int yp = blockIdx.y;
const int scale = threadIdx.y;
float *kernel = d_LaplaceKernel + octave*12*16 + scale*16;
float *sdata1 = data1 + (LAPLACE_W + 2*LAPLACE_R)*scale;
float *data = d_Image + max(min(xp - 4, width-1), 0);
int h = height-1;
sdata1[tx] = kernel[0]*data[min(yp, h)*pitch] +
kernel[1]*(data[max(0, min(yp-1, h))*pitch] + data[min(yp+1, h)*pitch]) +
kernel[2]*(data[max(0, min(yp-2, h))*pitch] + data[min(yp+2, h)*pitch]) +
kernel[3]*(data[max(0, min(yp-3, h))*pitch] + data[min(yp+3, h)*pitch]) +
kernel[4]*(data[max(0, min(yp-4, h))*pitch] + data[min(yp+4, h)*pitch]);
__syncthreads();
float *sdata2 = data2 + LAPLACE_W*scale;
if (tx<LAPLACE_W) {
sdata2[tx] = kernel[0]*sdata1[tx+4] +
kernel[1]*(sdata1[tx+3] + sdata1[tx+5]) +
kernel[2]*(sdata1[tx+2] + sdata1[tx+6]) +
kernel[3]*(sdata1[tx+1] + sdata1[tx+7]) +
kernel[4]*(sdata1[tx+0] + sdata1[tx+8]);
}
__syncthreads();
if (tx<LAPLACE_W && scale<LAPLACE_S-1 && xp<width)
d_Result[scale*height*pitch + yp*pitch + xp] = sdata2[tx] - sdata2[tx+LAPLACE_W];
}
__global__ void LowPass(float *d_Image, float *d_Result, int width, int pitch, int height)
{
__shared__ float buffer[(LOWPASS_W + 2*LOWPASS_R)*LOWPASS_H];
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int xp = blockIdx.x*LOWPASS_W + tx;
const int yp = blockIdx.y*LOWPASS_H + ty;
float *kernel = d_LowPassKernel;
float *data = d_Image + max(min(xp - 4, width-1), 0);
float *buff = buffer + ty*(LOWPASS_W + 2*LOWPASS_R);
int h = height-1;
if (yp<height)
buff[tx] = kernel[4]*data[min(yp, h)*pitch] +
kernel[3]*(data[max(0, min(yp-1, h))*pitch] + data[min(yp+1, h)*pitch]) +
kernel[2]*(data[max(0, min(yp-2, h))*pitch] + data[min(yp+2, h)*pitch]) +
kernel[1]*(data[max(0, min(yp-3, h))*pitch] + data[min(yp+3, h)*pitch]) +
kernel[0]*(data[max(0, min(yp-4, h))*pitch] + data[min(yp+4, h)*pitch]);
__syncthreads();
if (tx<LOWPASS_W && xp<width && yp<height)
d_Result[yp*pitch + xp] = kernel[4]*buff[tx+4] +
kernel[3]*(buff[tx+3] + buff[tx+5]) + kernel[2]*(buff[tx+2] + buff[tx+6]) +
kernel[1]*(buff[tx+1] + buff[tx+7]) + kernel[0]*(buff[tx+0] + buff[tx+8]);
}
__global__ void LowPassBlockOld(float *d_Image, float *d_Result, int width, int pitch, int height)
{
__shared__ float xrows[16][32];
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int xp = blockIdx.x*LOWPASS_W + tx;
const int yp = blockIdx.y*LOWPASS_H + ty;
const int N = 16;
float *k = d_LowPassKernel;
int xl = max(min(xp - 4, width-1), 0);
for (int l=-8;l<=LOWPASS_H;l+=4) {
if (l<LOWPASS_H) {
int yl = max(min(yp + l + 4, height-1), 0);
float val = d_Image[yl*pitch + xl];
xrows[(l + 8 + ty)%N][tx] = k[4]*ShiftDown(val, 4) +
k[3]*(ShiftDown(val, 5) + ShiftDown(val, 3)) +
k[2]*(ShiftDown(val, 6) + ShiftDown(val, 2)) +
k[1]*(ShiftDown(val, 7) + ShiftDown(val, 1)) +
k[0]*(ShiftDown(val, 8) + val);
}
if (l>=4) {
int ys = yp + l - 4;
if (xp<width && ys<height && tx<LOWPASS_W)
d_Result[ys*pitch + xp] = k[4]*xrows[(l + 0 + ty)%N][tx] +
k[3]*(xrows[(l - 1 + ty)%N][tx] + xrows[(l + 1 + ty)%N][tx]) +
k[2]*(xrows[(l - 2 + ty)%N][tx] + xrows[(l + 2 + ty)%N][tx]) +
k[1]*(xrows[(l - 3 + ty)%N][tx] + xrows[(l + 3 + ty)%N][tx]) +
k[0]*(xrows[(l - 4 + ty)%N][tx] + xrows[(l + 4 + ty)%N][tx]);
}
if (l>=0)
__syncthreads();
}
}
__global__ void LowPassBlock(float *d_Image, float *d_Result, int width, int pitch, int height)
{
__shared__ float xrows[16][32];
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int xp = blockIdx.x*LOWPASS_W + tx;
const int yp = blockIdx.y*LOWPASS_H + ty;
const int N = 16;
float *k = d_LowPassKernel;
int xl = max(min(xp - 4, width-1), 0);
#pragma unroll
for (int l=-8;l<4;l+=4) {
int ly = l + ty;
int yl = max(min(yp + l + 4, height-1), 0);
float val = d_Image[yl*pitch + xl];
val = k[4]*ShiftDown(val, 4) +
k[3]*(ShiftDown(val, 5) + ShiftDown(val, 3)) +
k[2]*(ShiftDown(val, 6) + ShiftDown(val, 2)) +
k[1]*(ShiftDown(val, 7) + ShiftDown(val, 1)) +
k[0]*(ShiftDown(val, 8) + val);
xrows[ly + 8][tx] = val;
}
__syncthreads();
#pragma unroll
for (int l=4;l<LOWPASS_H;l+=4) {
int ly = l + ty;
int yl = min(yp + l + 4, height-1);
float val = d_Image[yl*pitch + xl];
val = k[4]*ShiftDown(val, 4) +
k[3]*(ShiftDown(val, 5) + ShiftDown(val, 3)) +
k[2]*(ShiftDown(val, 6) + ShiftDown(val, 2)) +
k[1]*(ShiftDown(val, 7) + ShiftDown(val, 1)) +
k[0]*(ShiftDown(val, 8) + val);
xrows[(ly + 8)%N][tx] = val;
int ys = yp + l - 4;
if (xp<width && ys<height && tx<LOWPASS_W)
d_Result[ys*pitch + xp] = k[4]*xrows[(ly + 0)%N][tx] +
k[3]*(xrows[(ly - 1)%N][tx] + xrows[(ly + 1)%N][tx]) +
k[2]*(xrows[(ly - 2)%N][tx] + xrows[(ly + 2)%N][tx]) +
k[1]*(xrows[(ly - 3)%N][tx] + xrows[(ly + 3)%N][tx]) +
k[0]*(xrows[(ly - 4)%N][tx] + xrows[(ly + 4)%N][tx]);
__syncthreads();
}
int ly = LOWPASS_H + ty;
int ys = yp + LOWPASS_H - 4;
if (xp<width && ys<height && tx<LOWPASS_W)
d_Result[ys*pitch + xp] = k[4]*xrows[(ly + 0)%N][tx] +
k[3]*(xrows[(ly - 1)%N][tx] + xrows[(ly + 1)%N][tx]) +
k[2]*(xrows[(ly - 2)%N][tx] + xrows[(ly + 2)%N][tx]) +
k[1]*(xrows[(ly - 3)%N][tx] + xrows[(ly + 3)%N][tx]) +
k[0]*(xrows[(ly - 4)%N][tx] + xrows[(ly + 4)%N][tx]);
}
|
the_stack
|
#include <cstdio>
#include <vector>
#include <bitset>
#include <utility_kernels_pose.h>
#include <multiple_rigid_pose_kernels.h>
namespace pose {
// OpenGL mapped input textures
texture<float, cudaTextureType2D, cudaReadModeElementType> d_Zbuffer_texture;
texture<float, cudaTextureType2D, cudaReadModeElementType>
d_normalXArray_texture;
texture<float, cudaTextureType2D, cudaReadModeElementType>
d_normalYArray_texture;
texture<float, cudaTextureType2D, cudaReadModeElementType>
d_normalZArray_texture;
texture<float, cudaTextureType2D, cudaReadModeElementType>
d_segmentINDArray_texture;
// Residual flow after compensating for d_T and d_R_mat applied to
// d_init_Zbuffer
__global__ void
compute_residual_flow_GPU(float *d_res_flowx, float *d_res_flowy,
const float *d_flowx, const float *d_flowy,
const float *d_T, const float *d_R_mat,
const float *d_init_Z, int n_cols, int n_rows,
float ox, float oy, float fx, float fy) {
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if ((x < n_cols) & (y < n_rows)) // are we in the image?
{
unsigned int ind = x + y * n_cols;
float ux = d_flowx[ind];
// determine gl coord
float x_gl = (float)x + 0.5f;
float y_gl = (float)y + 0.5f;
int segment = (int)rintf(tex2D(d_segmentINDArray_texture, x_gl, y_gl));
if (isfinite(ux) & (segment > 0)) { // check validity
// move T and R pointers to correct position
d_T += 3 * segment - 3;
d_R_mat += 9 * segment - 9;
float uy = d_flowy[ind];
float Z = d_init_Z[ind];
float xt = __fdividef((x - ox), fx);
float yt = __fdividef((y - oy), fy);
// reconstruct initial model point
float X = xt * Z;
float Y = yt * Z;
// rigid transform model point
float X2 = d_R_mat[0] * X + d_R_mat[1] * Y + d_R_mat[2] * Z + d_T[0];
float Y2 = d_R_mat[3] * X + d_R_mat[4] * Y + d_R_mat[5] * Z + d_T[1];
float Z2 = d_R_mat[6] * X + d_R_mat[7] * Y + d_R_mat[8] * Z + d_T[2];
// explained flow
float ux_e = fx * X2 / Z2 - fx * xt;
float uy_e = fy * Y2 / Z2 - fy * yt;
// save residual flow
d_res_flowx[ind] = ux - ux_e;
d_res_flowy[ind] = uy - uy_e;
} else {
d_res_flowx[ind] = nanf("");
d_res_flowy[ind] = nanf("");
}
}
}
// Marks the valid flow locations with (segment index - 1) and the invalids with
// (n_objects) and also just the zbuffer locations
// --> invalids will be sorted after all valids
__global__ void mark_valid_flow_Zbuffer_and_Zbuffer_zero_based_GPU(
unsigned int *d_valid_flow_Zbuffer, unsigned int *d_valid_Zbuffer,
const float *d_flowx, int n_cols, int n_rows, int n_objects) {
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if ((x < n_cols) & (y < n_rows)) // are we in the image?
{
// determine linear index
unsigned int ind = x + y * n_cols;
// determine gl coord
float xt = (float)x + 0.5f;
float yt = (float)y + 0.5f;
// fetch segment index
int segmentIND = (int)rintf(tex2D(d_segmentINDArray_texture, xt, yt));
// change to zero-based index, with invalids = n_objects
segmentIND = (segmentIND == 0) ? n_objects : (segmentIND - 1);
d_valid_Zbuffer[ind] = segmentIND;
d_valid_flow_Zbuffer[ind] =
(isfinite(d_flowx[ind])) ? segmentIND : n_objects;
}
}
// Marks the valid flow and disparity locations that also belong to a specific
// segment, with that segment's index - 1
// The returned segment indices are zero-based and invalids obtain label
// n_objects
// as a result, invalids will be sorted after all valids
// Segments with 0 in the (zero-based) binary representation of
// segments_to_update
// are considered invalid
__global__ void mark_with_zero_based_segmentIND_GPU(
unsigned int *d_valid_flow_Zbuffer, unsigned int *d_valid_disparity_Zbuffer,
const float *d_flowx, const float *d_ar_flowx, const char *d_disparity,
int n_cols, int n_rows, int n_objects, int d_disparity_pitch,
bool mark_flow, bool mark_ar_flow, bool mark_disparity,
int segments_to_update) {
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if ((x < n_cols) & (y < n_rows)) // are we in the image?
{
// determine linear index
unsigned int ind = x + y * n_cols;
// determine gl coord
float xt = (float)x + 0.5f;
float yt = (float)y + 0.5f;
// fetch segment index
int segmentIND = (int)rintf(tex2D(d_segmentINDArray_texture, xt, yt));
// change to zero-based index, with invalids = n_objects
// segmentIND = ( (segmentIND==0) || !(segments_to_update & (1 <<
// (segmentIND-1))) ) ? n_objects : (segmentIND-1);
segmentIND = (segmentIND == 0) ? n_objects : (segmentIND - 1);
// mark flow
d_valid_flow_Zbuffer[ind] =
(isfinite(d_flowx[ind]) && mark_flow) ? segmentIND : n_objects;
// ar flow is marked at index + n_cols*n_rows
d_valid_flow_Zbuffer[ind + n_cols * n_rows] =
(isfinite(d_ar_flowx[ind]) && mark_ar_flow) ? segmentIND : n_objects;
// fetch disparity
float *disparity = (float *)(d_disparity + y * d_disparity_pitch) + x;
d_valid_disparity_Zbuffer[ind] =
(isfinite(disparity[0]) && mark_disparity) ? segmentIND : n_objects;
}
}
// Marks the valid flow and disparity locations that also belong to a specific
// segment, with that segment's index - 1 + index_offset
// The returned segment indices are zero-based and invalids obtain label
// 'invalid_index'
// as a result, invalids will be sorted after all valids
__global__ void mark_flow_disparity_GPU(
unsigned int *d_valid_flow_Zbuffer, unsigned int *d_valid_disparity_Zbuffer,
const float *d_flowx, const float *d_ar_flowx, const char *d_disparity,
int n_cols, int n_rows, int invalid_index, int index_offset,
int d_disparity_pitch, bool mark_flow, bool mark_ar_flow,
bool mark_disparity) {
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if ((x < n_cols) & (y < n_rows)) // are we in the image?
{
// determine linear index
unsigned int ind = x + y * n_cols;
// determine gl coord
float xt = (float)x + 0.5f;
float yt = (float)y + 0.5f;
// fetch segment index
int segmentIND = (int)rintf(tex2D(d_segmentINDArray_texture, xt, yt));
// change to zero-based index (with offset), with invalids = invalid_index
segmentIND =
(segmentIND == 0) ? invalid_index : (segmentIND - 1 + index_offset);
// mark flow
d_valid_flow_Zbuffer[ind] =
(isfinite(d_flowx[ind]) && mark_flow) ? segmentIND : invalid_index;
// ar flow is marked at index + n_cols*n_rows
d_valid_flow_Zbuffer[ind + n_cols * n_rows] =
(isfinite(d_ar_flowx[ind]) && mark_ar_flow) ? segmentIND
: invalid_index;
// fetch disparity
float *disparity = (float *)(d_disparity + y * d_disparity_pitch) + x;
d_valid_disparity_Zbuffer[ind] =
(isfinite(disparity[0]) && mark_disparity) ? segmentIND : invalid_index;
}
}
// Marks the valid flow locations that also belong to a specific segment, with
// that segment's index - 1 + index_offset
// The returned segment indices are zero-based and invalids obtain label
// 'invalid_index'
// as a result, invalids will be sorted after all valids
__global__ void mark_flow_GPU(unsigned int *d_valid_flow_Zbuffer,
const float *d_flowx, const float *d_ar_flowx,
int n_cols, int n_rows, int invalid_index,
int index_offset, bool mark_flow,
bool mark_ar_flow) {
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if ((x < n_cols) & (y < n_rows)) // are we in the image?
{
// determine linear index
unsigned int ind = x + y * n_cols;
// determine gl coord
float xt = (float)x + 0.5f;
float yt = (float)y + 0.5f;
// fetch segment index
int segmentIND = (int)rintf(tex2D(d_segmentINDArray_texture, xt, yt));
// change to zero-based index (with offset), with invalids = invalid_index
segmentIND =
(segmentIND == 0) ? invalid_index : (segmentIND - 1 + index_offset);
// mark flow
d_valid_flow_Zbuffer[ind] =
(isfinite(d_flowx[ind]) && mark_flow) ? segmentIND : invalid_index;
// ar flow is marked at index + n_cols*n_rows
d_valid_flow_Zbuffer[ind + n_cols * n_rows] =
(isfinite(d_ar_flowx[ind]) && mark_ar_flow) ? segmentIND
: invalid_index;
}
}
// Regularly subsample indices and labels
__global__ void subsample_ind_and_labels_GPU(int *d_ind_sub, const int *d_ind,
unsigned int *d_label_sub,
const unsigned int *d_label,
int n_out, float inv_sub_factor) {
unsigned int ind_out = blockIdx.x * blockDim.x + threadIdx.x;
if (ind_out < n_out) {
int ind_in = (int)floorf((float)(ind_out) * inv_sub_factor);
d_ind_sub[ind_out] = d_ind[ind_in];
d_label_sub[ind_out] = d_label[ind_in];
}
}
// Gather the valid flow and Zbuffer + transform Zbuffer
// ind_flow_offset used in multi-camera case
__global__ void gather_valid_flow_Zbuffer_GPU(
float2 *d_flow_compact, float *d_Zbuffer_compact, const float *d_flowx,
const float *d_flowy, const float *d_ar_flowx, const float *d_ar_flowy,
const int *d_ind_flow_Zbuffer, int n_valid_flow_Zbuffer, int n_cols,
int n_rows, float Z_conv1, float Z_conv2, int ind_flow_offset = 0) {
unsigned int ind = blockIdx.x * blockDim.x + threadIdx.x;
if (ind < n_valid_flow_Zbuffer) {
int ind_flow = d_ind_flow_Zbuffer[ind] - ind_flow_offset;
// fetch and write flow
if (ind_flow < (n_rows * n_cols)) // image flow
d_flow_compact[ind] = make_float2(d_flowx[ind_flow], d_flowy[ind_flow]);
else { // ar flow
ind_flow -= n_rows * n_cols;
d_flow_compact[ind] =
make_float2(d_ar_flowx[ind_flow], d_ar_flowy[ind_flow]);
}
// extract row (y) and column (x) from linear index
int y = floorf(__fdividef((float)ind_flow, n_cols));
int x = ind_flow - y * n_cols;
// determine gl index
float xt = (float)x + 0.5f;
float yt = (float)y + 0.5f;
// fetch Zbuffer
float Zbuffer = tex2D(d_Zbuffer_texture, xt, yt);
// transform Zbuffer
Zbuffer = __fdividef(Z_conv1, Zbuffer + Z_conv2);
// write Zbuffer
d_Zbuffer_compact[ind] = Zbuffer;
}
}
// Gather the valid disparity, Zbuffer and normals + transform Zbuffer and
// normals
__global__ void gather_valid_disparity_Zbuffer_GPU(
float *d_disparity_compact, float4 *d_Zbuffer_normals_compact,
const char *d_disparity, const int *d_ind_disparity_Zbuffer,
int n_valid_disparity_Zbuffer, int n_cols, int n_rows, float Z_conv1,
float Z_conv2, int disparity_pitch, int ind_disp_offset = 0) {
unsigned int ind = blockIdx.x * blockDim.x + threadIdx.x;
if (ind < n_valid_disparity_Zbuffer) {
int ind_disp = d_ind_disparity_Zbuffer[ind] - ind_disp_offset;
// extract row (y) and column (x) from linear index
int y = floorf(__fdividef((float)ind_disp, n_cols));
int x = ind_disp - y * n_cols;
// fetch and write disparity
d_disparity_compact[ind] =
*((float *)(d_disparity + y * disparity_pitch) + x);
// determine gl index
float xt = (float)x + 0.5f;
float yt = (float)y + 0.5f;
// fetch Zbuffer and normal
float Zbuffer = tex2D(d_Zbuffer_texture, xt, yt);
float normalx = tex2D(d_normalXArray_texture, xt, yt);
float normaly = tex2D(d_normalYArray_texture, xt, yt);
float normalz = tex2D(d_normalZArray_texture, xt, yt);
// transform Zbuffer
Zbuffer = __fdividef(Z_conv1, Zbuffer + Z_conv2);
// transform normal
normalz = -normalz;
// write Zbuffer and normal
d_Zbuffer_normals_compact[ind] =
make_float4(Zbuffer, normalx, normaly, normalz);
}
}
// Unweighted normal equations for flow
__global__ void normal_eqs_flow_GPU(float *d_CO, const float2 *d_flow_compact,
const float *d_Zbuffer_flow_compact,
const int *d_ind_flow_Zbuffer, float fx,
float fy, float ox, float oy, int n_rows,
int n_cols, const int *d_n_values_flow,
const int *d_start_ind_flow) {
int n_val_accum = gridDim.x * blockDim.x; // _MAX_N_VAL_ACCUM may not be
// multiple of blocksize
int n_flow = d_n_values_flow[blockIdx.y];
int n_accum = (int)ceilf((float)n_flow / (float)n_val_accum);
int start_ind = d_start_ind_flow[blockIdx.y];
// initialize accumulators
float A0 = 0.0f, A1 = 0.0f, A2 = 0.0f, A3 = 0.0f, A4 = 0.0f, A5 = 0.0f,
A6 = 0.0f, A7 = 0.0f, A8 = 0.0f, A9 = 0.0f, A10 = 0.0f, A11 = 0.0f,
A12 = 0.0f, A13 = 0.0f, A14 = 0.0f, A15 = 0.0f, A16 = 0.0f, A17 = 0.0f,
A18 = 0.0f, A19 = 0.0f, A20 = 0.0f, A21 = 0.0f, A22 = 0.0f;
for (int in_ind = blockDim.x * blockIdx.x * n_accum + threadIdx.x;
in_ind < blockDim.x * (blockIdx.x + 1) * n_accum; in_ind += blockDim.x) {
if (in_ind < n_flow) { // is this a valid sample?
// fetch flow and Zbuffer from global memory
float2 u = d_flow_compact[in_ind + start_ind];
float disp = __fdividef(1.0f, d_Zbuffer_flow_compact[in_ind + start_ind]);
// compute coordinates
int pixel_ind = d_ind_flow_Zbuffer[in_ind + start_ind];
bool is_ar_flow = (pixel_ind >= (n_rows * n_cols));
pixel_ind -= (int)is_ar_flow * n_rows * n_cols;
float y = floorf(__fdividef((float)pixel_ind, n_cols));
float x = (float)pixel_ind - y * n_cols;
x = x - ox;
y = y - oy;
/************************/
/* evaluate constraints */
/************************/
// unique values A-matrix
A0 += (disp * disp * fx * fx);
A1 += (-disp * disp * x * fx);
A2 += (-disp * x * y);
A3 += (disp * fx * fx + disp * x * x);
A4 += (-disp * y * fx);
A5 += (-disp * disp * y * fy);
A6 += (-disp * fy * fy - disp * y * y); //!!!!
A7 += (disp * x * fy);
A8 += (disp * disp * x * x + disp * disp * y * y);
A9 += (disp * x * x * y / fx + disp * y * fy + disp * y * y * y / fy);
A10 += (-disp * x * fx - disp * x * x * x / fx - disp * x * y * y / fy);
A11 += (x * x * y * y / (fx * fx) + fy * fy + 2.0f * y * y +
y * y * y * y / (fy * fy));
A12 += (-2.0f * x * y - x * x * x * y / (fx * fx) -
x * y * y * y / (fy * fy));
A13 += (x * y * y / fx - x * fy - x * y * y / fy);
A14 += (fx * fx + 2.0f * x * x + x * x * x * x / (fx * fx) +
x * x * y * y / (fy * fy));
A15 += (-y * fx - x * x * y / fx + x * x * y / fy);
A16 += (x * x + y * y);
// B-vector
A17 += (disp * u.x * fx);
A18 += (disp * u.y * fy);
A19 += (-disp * x * u.x - disp * y * u.y);
A20 += (-x * y * u.x / fx - u.y * fy - u.y * y * y / fy);
A21 += (u.x * fx + x * x * u.x / fx + x * y * u.y / fy);
A22 += (-y * u.x + x * u.y);
}
}
/**************************/
/* write out accumulators */
/**************************/
int out_ind =
23 * n_val_accum * blockIdx.y + blockDim.x * blockIdx.x + threadIdx.x;
d_CO[out_ind] = A0;
d_CO[out_ind + n_val_accum] = A1;
d_CO[out_ind + 2 * n_val_accum] = A2;
d_CO[out_ind + 3 * n_val_accum] = A3;
d_CO[out_ind + 4 * n_val_accum] = A4;
d_CO[out_ind + 5 * n_val_accum] = A5;
d_CO[out_ind + 6 * n_val_accum] = A6;
d_CO[out_ind + 7 * n_val_accum] = A7;
d_CO[out_ind + 8 * n_val_accum] = A8;
d_CO[out_ind + 9 * n_val_accum] = A9;
d_CO[out_ind + 10 * n_val_accum] = A10;
d_CO[out_ind + 11 * n_val_accum] = A11;
d_CO[out_ind + 12 * n_val_accum] = A12;
d_CO[out_ind + 13 * n_val_accum] = A13;
d_CO[out_ind + 14 * n_val_accum] = A14;
d_CO[out_ind + 15 * n_val_accum] = A15;
d_CO[out_ind + 16 * n_val_accum] = A16;
d_CO[out_ind + 17 * n_val_accum] = A17;
d_CO[out_ind + 18 * n_val_accum] = A18;
d_CO[out_ind + 19 * n_val_accum] = A19;
d_CO[out_ind + 20 * n_val_accum] = A20;
d_CO[out_ind + 21 * n_val_accum] = A21;
d_CO[out_ind + 22 * n_val_accum] = A22;
}
// Unweighted normal equations for flow in multicamera scenario with different
// calibration parameters and pixel offsets in d_ind_disp_Zbuffer
__global__ void normal_eqs_flow_multicam_GPU(
float *d_CO, float2 *d_flow_compact, float *d_Zbuffer_flow_compact,
int *d_ind_flow_Zbuffer, const float *d_focal_length,
const float *d_nodal_point_x, const float *d_nodal_point_y,
const int *d_n_rows, const int *d_n_cols, const int *d_n_values_flow,
const int *d_start_ind_flow, const int *d_pixel_ind_offset) {
int n_val_accum = gridDim.x * blockDim.x; // _MAX_N_VAL_ACCUM may not be
// multiple of blocksize
int n_flow = d_n_values_flow[blockIdx.y];
int n_accum = (int)ceilf((float)n_flow / (float)n_val_accum);
int start_ind = d_start_ind_flow[blockIdx.y];
float f = d_focal_length[blockIdx.y];
float ox = d_nodal_point_x[blockIdx.y];
float oy = d_nodal_point_y[blockIdx.y];
int n_rows = d_n_rows[blockIdx.y];
int n_cols = d_n_cols[blockIdx.y];
int pixel_ind_offset = d_pixel_ind_offset[blockIdx.y];
// initialize accumulators
float A0 = 0.0f, A1 = 0.0f, A2 = 0.0f, A3 = 0.0f, A4 = 0.0f, A5 = 0.0f,
A6 = 0.0f, A7 = 0.0f, A8 = 0.0f, A9 = 0.0f, A10 = 0.0f, A11 = 0.0f,
A12 = 0.0f, A13 = 0.0f, A14 = 0.0f, A15 = 0.0f, A16 = 0.0f, A17 = 0.0f,
A18 = 0.0f, A19 = 0.0f, A20 = 0.0f, A21 = 0.0f, A22 = 0.0f;
for (int in_ind = blockDim.x * blockIdx.x * n_accum + threadIdx.x;
in_ind < blockDim.x * (blockIdx.x + 1) * n_accum; in_ind += blockDim.x) {
if (in_ind < n_flow) { // is this a valid sample?
// fetch flow and Zbuffer from global memory
float2 u = d_flow_compact[in_ind + start_ind];
float disp = __fdividef(1.0f, d_Zbuffer_flow_compact[in_ind + start_ind]);
// compute coordinates
int pixel_ind = d_ind_flow_Zbuffer[in_ind + start_ind] - pixel_ind_offset;
bool is_ar_flow = (pixel_ind >= (n_rows * n_cols));
pixel_ind -= (int)is_ar_flow * n_rows * n_cols;
float y = floorf(__fdividef((float)pixel_ind, n_cols));
float x = (float)pixel_ind - y * n_cols;
x = x - ox;
y = y - oy;
// flip y axis
y = -y;
u.y = -u.y;
/************************/
/* evaluate constraints */
/************************/
// unique values A-matrix
A0 += (disp * disp * f * f);
A1 += (-disp * disp * x * f);
A2 += (-disp * x * y);
A3 += (disp * f * f + disp * x * x);
A4 += (-disp * y * f);
A5 += (-disp * disp * y * f);
A6 += (-disp * f * f - disp * y * y);
A7 += (disp * x * f);
A8 += (disp * disp * x * x + disp * disp * y * y);
A9 += (disp * x * x * y / f + disp * y * f + disp * y * y * y / f);
A10 += (-disp * x * f - disp * x * x * x / f - disp * x * y * y / f);
A11 += (x * x * y * y / (f * f) + f * f + 2.0f * y * y +
y * y * y * y / (f * f));
A12 +=
(-2.0f * x * y - x * x * x * y / (f * f) - x * y * y * y / (f * f));
A13 += (-x * f);
A14 += (f * f + 2.0f * x * x + x * x * x * x / (f * f) +
x * x * y * y / (f * f));
A15 += (-y * f);
A16 += (x * x + y * y);
// B-vector
A17 += (disp * u.x * f);
A18 += (disp * u.y * f);
A19 += (-disp * x * u.x - disp * y * u.y);
A20 += (-x * y * u.x / f - u.y * f - u.y * y * y / f);
A21 += (u.x * f + x * x * u.x / f + x * y * u.y / f);
A22 += (-y * u.x + x * u.y);
}
}
/**************************/
/* write out accumulators */
/**************************/
int out_ind =
23 * n_val_accum * blockIdx.y + blockDim.x * blockIdx.x + threadIdx.x;
d_CO[out_ind] = A0;
d_CO[out_ind + n_val_accum] = A1;
d_CO[out_ind + 2 * n_val_accum] = A2;
d_CO[out_ind + 3 * n_val_accum] = A3;
d_CO[out_ind + 4 * n_val_accum] = A4;
d_CO[out_ind + 5 * n_val_accum] = A5;
d_CO[out_ind + 6 * n_val_accum] = A6;
d_CO[out_ind + 7 * n_val_accum] = A7;
d_CO[out_ind + 8 * n_val_accum] = A8;
d_CO[out_ind + 9 * n_val_accum] = A9;
d_CO[out_ind + 10 * n_val_accum] = A10;
d_CO[out_ind + 11 * n_val_accum] = A11;
d_CO[out_ind + 12 * n_val_accum] = A12;
d_CO[out_ind + 13 * n_val_accum] = A13;
d_CO[out_ind + 14 * n_val_accum] = A14;
d_CO[out_ind + 15 * n_val_accum] = A15;
d_CO[out_ind + 16 * n_val_accum] = A16;
d_CO[out_ind + 17 * n_val_accum] = A17;
d_CO[out_ind + 18 * n_val_accum] = A18;
d_CO[out_ind + 19 * n_val_accum] = A19;
d_CO[out_ind + 20 * n_val_accum] = A20;
d_CO[out_ind + 21 * n_val_accum] = A21;
d_CO[out_ind + 22 * n_val_accum] = A22;
}
// Unweighted normal equations for disparity
__global__ void
normal_eqs_disparity_GPU(float *d_CD, const float *d_disparity_compact,
const float4 *d_Zbuffer_normals_compact,
const int *d_ind_disparity_Zbuffer, float fx, float fy,
float ox, float oy, float b, int n_cols,
const int *d_n_values_disparity,
const int *d_start_ind_disparity, float w_disp) {
int n_val_accum = gridDim.x * blockDim.x; // _MAX_N_VAL_ACCUM may not be
// multiple of blocksize
int n_disparity = d_n_values_disparity[blockIdx.y];
int n_accum = (int)ceilf((float)n_disparity / (float)n_val_accum);
int start_ind = d_start_ind_disparity[blockIdx.y];
// initialize accumulators
float A0 = 0.0f, A1 = 0.0f, A2 = 0.0f, A3 = 0.0f, A4 = 0.0f, A5 = 0.0f,
A6 = 0.0f, A7 = 0.0f, A8 = 0.0f, A9 = 0.0f, A10 = 0.0f, A11 = 0.0f,
A12 = 0.0f, A13 = 0.0f, A14 = 0.0f, A15 = 0.0f, A16 = 0.0f, A17 = 0.0f,
A18 = 0.0f, A19 = 0.0f, A20 = 0.0f, A21 = 0.0f, A22 = 0.0f, A23 = 0.0f,
A24 = 0.0f, A25 = 0.0f, A26 = 0.0f;
for (int in_ind = blockDim.x * blockIdx.x * n_accum + threadIdx.x;
in_ind < blockDim.x * (blockIdx.x + 1) * n_accum; in_ind += blockDim.x) {
if (in_ind < n_disparity) { // is this a valid sample?
// fetch disparity, Zbuffer and normal from global memory
float disp = d_disparity_compact[in_ind + start_ind];
float4 tmp = d_Zbuffer_normals_compact[in_ind + start_ind];
float Zbuffer = tmp.x;
float nx = tmp.y;
float ny = tmp.z;
float nz = tmp.w;
// compute coordinates
int pixel_ind = d_ind_disparity_Zbuffer[in_ind + start_ind];
float y = floorf(__fdividef((float)pixel_ind, n_cols));
float x = (float)pixel_ind - y * n_cols;
x = __fdividef((x - ox), fx);
y = __fdividef((y - oy), fy);
// reconstruct 3D point from disparity
float Zd = -(fx * b) / disp; // arbitrary conversion for now using fx
float Xd = x * Zd;
float Yd = y * Zd;
// reconstruct 3D point from model
float Zm = Zbuffer;
float Xm = x * Zm;
float Ym = y * Zm;
// weight the constraint according to (fx*b)/(Zm*Zm) to convert
// from distance- (mm) to image-units (pixel)
float w2 = fx * b / (Zm * Zm);
w2 *= w2;
/************************/
/* evaluate constraints */
/************************/
// unique values A-matrix
A0 += w2 * (nx * nx);
A1 += w2 * (nx * ny);
A2 += w2 * (nx * nz);
A3 += w2 * (Ym * nx * nz - Zm * nx * ny);
A4 += w2 * (Zm * (nx * nx) - Xm * nx * nz);
A5 += w2 * (-Ym * (nx * nx) + Xm * nx * ny);
A6 += w2 * (ny * ny);
A7 += w2 * (ny * nz);
A8 += w2 * (-Zm * (ny * ny) + Ym * ny * nz);
A9 += w2 * (-Xm * ny * nz + Zm * nx * ny);
A10 += w2 * (Xm * (ny * ny) - Ym * nx * ny);
A11 += w2 * (nz * nz);
A12 += w2 * (Ym * (nz * nz) - Zm * ny * nz);
A13 += w2 * (-Xm * (nz * nz) + Zm * nx * nz);
A14 += w2 * (Xm * ny * nz - Ym * nx * nz);
A15 += w2 * ((Ym * Ym) * (nz * nz) + (Zm * Zm) * (ny * ny) -
Ym * Zm * ny * nz * 2.0f);
A16 += w2 * (-Xm * Ym * (nz * nz) - (Zm * Zm) * nx * ny +
Xm * Zm * ny * nz + Ym * Zm * nx * nz);
A17 += w2 * (-Xm * Zm * (ny * ny) - (Ym * Ym) * nx * nz +
Xm * Ym * ny * nz + Ym * Zm * nx * ny);
A18 += w2 * ((Xm * Xm) * (nz * nz) + (Zm * Zm) * (nx * nx) -
Xm * Zm * nx * nz * 2.0f);
A19 += w2 * (-Ym * Zm * (nx * nx) - (Xm * Xm) * ny * nz +
Xm * Ym * nx * nz + Xm * Zm * nx * ny);
A20 += w2 * ((Xm * Xm) * (ny * ny) + (Ym * Ym) * (nx * nx) -
Xm * Ym * nx * ny * 2.0f);
// B-vector
A21 += w2 * (Xd * (nx * nx) - Xm * (nx * nx) + Yd * nx * ny -
Ym * nx * ny + Zd * nx * nz - Zm * nx * nz);
A22 += w2 * (Yd * (ny * ny) - Ym * (ny * ny) + Xd * nx * ny -
Xm * nx * ny + Zd * ny * nz - Zm * ny * nz);
A23 += w2 * (Zd * (nz * nz) - Zm * (nz * nz) + Xd * nx * nz -
Xm * nx * nz + Yd * ny * nz - Ym * ny * nz);
A24 += w2 *
(-Yd * Zm * (ny * ny) + Ym * Zd * (nz * nz) + Ym * Zm * (ny * ny) -
Ym * Zm * (nz * nz) - (Ym * Ym) * ny * nz + (Zm * Zm) * ny * nz +
Xd * Ym * nx * nz - Xm * Ym * nx * nz - Xd * Zm * nx * ny +
Yd * Ym * ny * nz + Xm * Zm * nx * ny - Zd * Zm * ny * nz);
A25 += w2 *
(Xd * Zm * (nx * nx) - Xm * Zd * (nz * nz) - Xm * Zm * (nx * nx) +
Xm * Zm * (nz * nz) + (Xm * Xm) * nx * nz - (Zm * Zm) * nx * nz -
Xd * Xm * nx * nz - Xm * Yd * ny * nz + Xm * Ym * ny * nz +
Yd * Zm * nx * ny - Ym * Zm * nx * ny + Zd * Zm * nx * nz);
A26 += w2 *
(-Xd * Ym * (nx * nx) + Xm * Yd * (ny * ny) + Xm * Ym * (nx * nx) -
Xm * Ym * (ny * ny) - (Xm * Xm) * nx * ny + (Ym * Ym) * nx * ny +
Xd * Xm * nx * ny - Yd * Ym * nx * ny + Xm * Zd * ny * nz -
Xm * Zm * ny * nz - Ym * Zd * nx * nz + Ym * Zm * nx * nz);
}
}
/**************************/
/* write out accumulators */
/**************************/
int out_ind =
27 * n_val_accum * blockIdx.y + blockDim.x * blockIdx.x + threadIdx.x;
w_disp *= w_disp; // weight relative to flow
d_CD[out_ind] = w_disp * A0;
d_CD[out_ind + n_val_accum] = w_disp * A1;
d_CD[out_ind + 2 * n_val_accum] = w_disp * A2;
d_CD[out_ind + 3 * n_val_accum] = w_disp * A3;
d_CD[out_ind + 4 * n_val_accum] = w_disp * A4;
d_CD[out_ind + 5 * n_val_accum] = w_disp * A5;
d_CD[out_ind + 6 * n_val_accum] = w_disp * A6;
d_CD[out_ind + 7 * n_val_accum] = w_disp * A7;
d_CD[out_ind + 8 * n_val_accum] = w_disp * A8;
d_CD[out_ind + 9 * n_val_accum] = w_disp * A9;
d_CD[out_ind + 10 * n_val_accum] = w_disp * A10;
d_CD[out_ind + 11 * n_val_accum] = w_disp * A11;
d_CD[out_ind + 12 * n_val_accum] = w_disp * A12;
d_CD[out_ind + 13 * n_val_accum] = w_disp * A13;
d_CD[out_ind + 14 * n_val_accum] = w_disp * A14;
d_CD[out_ind + 15 * n_val_accum] = w_disp * A15;
d_CD[out_ind + 16 * n_val_accum] = w_disp * A16;
d_CD[out_ind + 17 * n_val_accum] = w_disp * A17;
d_CD[out_ind + 18 * n_val_accum] = w_disp * A18;
d_CD[out_ind + 19 * n_val_accum] = w_disp * A19;
d_CD[out_ind + 20 * n_val_accum] = w_disp * A20;
d_CD[out_ind + 21 * n_val_accum] = w_disp * A21;
d_CD[out_ind + 22 * n_val_accum] = w_disp * A22;
d_CD[out_ind + 23 * n_val_accum] = w_disp * A23;
d_CD[out_ind + 24 * n_val_accum] = w_disp * A24;
d_CD[out_ind + 25 * n_val_accum] = w_disp * A25;
d_CD[out_ind + 26 * n_val_accum] = w_disp * A26;
}
// Unweighted normal equations for disparity in multicamera scenario with
// different calibration parameters and pixel offsets in d_ind_flow_Zbuffer
__global__ void normal_eqs_disparity_multicam_GPU(
float *d_CD, float *d_disparity_compact, float4 *d_Zbuffer_normals_compact,
int *d_ind_disparity_Zbuffer, const float *d_focal_length,
const float *d_nodal_point_x, const float *d_nodal_point_y,
const float *d_baseline, const int *d_n_cols,
const int *d_n_values_disparity, const int *d_start_ind_disparity,
const int *d_pixel_ind_offset) {
int n_val_accum = gridDim.x * blockDim.x; // _MAX_N_VAL_ACCUM may not be
// multiple of blocksize
int n_disparity = d_n_values_disparity[blockIdx.y];
int n_accum = (int)ceilf((float)n_disparity / (float)n_val_accum);
int start_ind = d_start_ind_disparity[blockIdx.y];
float f = d_focal_length[blockIdx.y];
float ox = d_nodal_point_x[blockIdx.y];
float oy = d_nodal_point_y[blockIdx.y];
float b = d_baseline[blockIdx.y];
int n_cols = d_n_cols[blockIdx.y];
int pixel_ind_offset = d_pixel_ind_offset[blockIdx.y];
// initialize accumulators
float A0 = 0.0f, A1 = 0.0f, A2 = 0.0f, A3 = 0.0f, A4 = 0.0f, A5 = 0.0f,
A6 = 0.0f, A7 = 0.0f, A8 = 0.0f, A9 = 0.0f, A10 = 0.0f, A11 = 0.0f,
A12 = 0.0f, A13 = 0.0f, A14 = 0.0f, A15 = 0.0f, A16 = 0.0f, A17 = 0.0f,
A18 = 0.0f, A19 = 0.0f, A20 = 0.0f, A21 = 0.0f, A22 = 0.0f, A23 = 0.0f,
A24 = 0.0f, A25 = 0.0f, A26 = 0.0f;
for (int in_ind = blockDim.x * blockIdx.x * n_accum + threadIdx.x;
in_ind < blockDim.x * (blockIdx.x + 1) * n_accum; in_ind += blockDim.x) {
if (in_ind < n_disparity) { // is this a valid sample?
// fetch disparity, Zbuffer and normal from global memory
float disp = d_disparity_compact[in_ind + start_ind];
float4 tmp = d_Zbuffer_normals_compact[in_ind + start_ind];
float Zbuffer = tmp.x;
float nx = tmp.y;
float ny = tmp.z;
float nz = tmp.w;
// compute coordinates
int pixel_ind =
d_ind_disparity_Zbuffer[in_ind + start_ind] - pixel_ind_offset;
float y = floorf(__fdividef((float)pixel_ind, n_cols));
float x = (float)pixel_ind - y * n_cols;
x = __fdividef((x - ox), f);
y = -__fdividef((y - oy), f);
// reconstruct 3D point from disparity
float Zd = -(f * b) / disp;
float Xd = x * Zd;
float Yd = y * Zd;
// reconstruct 3D point from model
float Zm = Zbuffer;
float Xm = x * Zm;
float Ym = y * Zm;
/************************/
/* evaluate constraints */
/************************/
// unique values A-matrix
A0 += nx * nx;
A1 += nx * ny;
A2 += nx * nz;
A3 += Ym * nx * nz - Zm * nx * ny;
A4 += Zm * (nx * nx) - Xm * nx * nz;
A5 += -Ym * (nx * nx) + Xm * nx * ny;
A6 += ny * ny;
A7 += ny * nz;
A8 += -Zm * (ny * ny) + Ym * ny * nz;
A9 += -Xm * ny * nz + Zm * nx * ny;
A10 += Xm * (ny * ny) - Ym * nx * ny;
A11 += nz * nz;
A12 += Ym * (nz * nz) - Zm * ny * nz;
A13 += -Xm * (nz * nz) + Zm * nx * nz;
A14 += Xm * ny * nz - Ym * nx * nz;
A15 += (Ym * Ym) * (nz * nz) + (Zm * Zm) * (ny * ny) -
Ym * Zm * ny * nz * 2.0f;
A16 += -Xm * Ym * (nz * nz) - (Zm * Zm) * nx * ny + Xm * Zm * ny * nz +
Ym * Zm * nx * nz;
A17 += -Xm * Zm * (ny * ny) - (Ym * Ym) * nx * nz + Xm * Ym * ny * nz +
Ym * Zm * nx * ny;
A18 += (Xm * Xm) * (nz * nz) + (Zm * Zm) * (nx * nx) -
Xm * Zm * nx * nz * 2.0f;
A19 += -Ym * Zm * (nx * nx) - (Xm * Xm) * ny * nz + Xm * Ym * nx * nz +
Xm * Zm * nx * ny;
A20 += (Xm * Xm) * (ny * ny) + (Ym * Ym) * (nx * nx) -
Xm * Ym * nx * ny * 2.0f;
// B-vector
A21 += Xd * (nx * nx) - Xm * (nx * nx) + Yd * nx * ny - Ym * nx * ny +
Zd * nx * nz - Zm * nx * nz;
A22 += Yd * (ny * ny) - Ym * (ny * ny) + Xd * nx * ny - Xm * nx * ny +
Zd * ny * nz - Zm * ny * nz;
A23 += Zd * (nz * nz) - Zm * (nz * nz) + Xd * nx * nz - Xm * nx * nz +
Yd * ny * nz - Ym * ny * nz;
A24 += -Yd * Zm * (ny * ny) + Ym * Zd * (nz * nz) + Ym * Zm * (ny * ny) -
Ym * Zm * (nz * nz) - (Ym * Ym) * ny * nz + (Zm * Zm) * ny * nz +
Xd * Ym * nx * nz - Xm * Ym * nx * nz - Xd * Zm * nx * ny +
Yd * Ym * ny * nz + Xm * Zm * nx * ny - Zd * Zm * ny * nz;
A25 += Xd * Zm * (nx * nx) - Xm * Zd * (nz * nz) - Xm * Zm * (nx * nx) +
Xm * Zm * (nz * nz) + (Xm * Xm) * nx * nz - (Zm * Zm) * nx * nz -
Xd * Xm * nx * nz - Xm * Yd * ny * nz + Xm * Ym * ny * nz +
Yd * Zm * nx * ny - Ym * Zm * nx * ny + Zd * Zm * nx * nz;
A26 += -Xd * Ym * (nx * nx) + Xm * Yd * (ny * ny) + Xm * Ym * (nx * nx) -
Xm * Ym * (ny * ny) - (Xm * Xm) * nx * ny + (Ym * Ym) * nx * ny +
Xd * Xm * nx * ny - Yd * Ym * nx * ny + Xm * Zd * ny * nz -
Xm * Zm * ny * nz - Ym * Zd * nx * nz + Ym * Zm * nx * nz;
}
}
/**************************/
/* write out accumulators */
/**************************/
int out_ind =
27 * n_val_accum * blockIdx.y + blockDim.x * blockIdx.x + threadIdx.x;
d_CD[out_ind] = A0;
d_CD[out_ind + n_val_accum] = A1;
d_CD[out_ind + 2 * n_val_accum] = A2;
d_CD[out_ind + 3 * n_val_accum] = A3;
d_CD[out_ind + 4 * n_val_accum] = A4;
d_CD[out_ind + 5 * n_val_accum] = A5;
d_CD[out_ind + 6 * n_val_accum] = A6;
d_CD[out_ind + 7 * n_val_accum] = A7;
d_CD[out_ind + 8 * n_val_accum] = A8;
d_CD[out_ind + 9 * n_val_accum] = A9;
d_CD[out_ind + 10 * n_val_accum] = A10;
d_CD[out_ind + 11 * n_val_accum] = A11;
d_CD[out_ind + 12 * n_val_accum] = A12;
d_CD[out_ind + 13 * n_val_accum] = A13;
d_CD[out_ind + 14 * n_val_accum] = A14;
d_CD[out_ind + 15 * n_val_accum] = A15;
d_CD[out_ind + 16 * n_val_accum] = A16;
d_CD[out_ind + 17 * n_val_accum] = A17;
d_CD[out_ind + 18 * n_val_accum] = A18;
d_CD[out_ind + 19 * n_val_accum] = A19;
d_CD[out_ind + 20 * n_val_accum] = A20;
d_CD[out_ind + 21 * n_val_accum] = A21;
d_CD[out_ind + 22 * n_val_accum] = A22;
d_CD[out_ind + 23 * n_val_accum] = A23;
d_CD[out_ind + 24 * n_val_accum] = A24;
d_CD[out_ind + 25 * n_val_accum] = A25;
d_CD[out_ind + 26 * n_val_accum] = A26;
}
// Final reduction of the normal equations
__global__ void reduce_normal_eqs_64_GPU(float *d_C_reduced, float *d_C,
int gridDim_x_normal_equations) {
int tid = threadIdx.x;
int bx = blockIdx.x;
// put data in shared memory
int ind = blockIdx.y * gridDim.x * gridDim_x_normal_equations * 64 +
bx * gridDim_x_normal_equations * 64 + tid;
__shared__ float DATA[64];
// load and sum the first 20 elements
float tmp = 0.0f;
for (int i = 0; i < gridDim_x_normal_equations; i++)
tmp += d_C[ind + i * 64];
DATA[tid] = tmp;
__syncthreads(); // ensure reading stage has finished
// reduction
if (tid < 32) { // warp-reduce
DATA[tid] += DATA[tid + 32];
__syncthreads();
DATA[tid] += DATA[tid + 16];
__syncthreads();
DATA[tid] += DATA[tid + 8];
__syncthreads();
DATA[tid] += DATA[tid + 4];
__syncthreads();
DATA[tid] += DATA[tid + 2];
__syncthreads();
DATA[tid] += DATA[tid + 1];
__syncthreads();
}
// write results
if (tid == 0)
d_C_reduced[blockIdx.y * gridDim.x + bx] = DATA[0];
}
// Final reduction of the normal equations
// In this version each block processes multiple constraints (according to
// threadIdx.y)
__global__ void
reduce_normal_eqs_64_mult_constr_GPU(float *d_C_reduced, const float *d_C,
int gridDim_x_normal_equations,
int n_constraints) {
// check if there are constraints left to be processed
int constraint_ind = blockIdx.x * 4 + threadIdx.y;
if (constraint_ind < n_constraints) {
int tid = 64 * threadIdx.y + threadIdx.x;
// put data in shared memory
int ind = blockIdx.y * n_constraints * gridDim_x_normal_equations * 64 +
constraint_ind * gridDim_x_normal_equations * 64 + threadIdx.x;
__shared__ float DATA[64 * 4];
// load and sum the first gridDim_x_normal_equations elements
float tmp = 0.0f;
for (int i = 0; i < gridDim_x_normal_equations; i++)
tmp += d_C[ind + i * 64];
DATA[tid] = tmp;
__syncthreads(); // ensure reading stage has finished
if ((tid - 64 * threadIdx.y) < 32) { // warp-reduce
DATA[tid] += DATA[tid + 32];
__syncthreads();
DATA[tid] += DATA[tid + 16];
__syncthreads();
DATA[tid] += DATA[tid + 8];
__syncthreads();
DATA[tid] += DATA[tid + 4];
__syncthreads();
DATA[tid] += DATA[tid + 2];
__syncthreads();
DATA[tid] += DATA[tid + 1];
__syncthreads();
}
// write results
if (threadIdx.x == 0)
d_C_reduced[blockIdx.y * n_constraints + constraint_ind] = DATA[tid];
}
}
// Auxiliary device functions to compute OLS absolute residual
__device__ static float flow_absolute_residual(float x, float y, float ux,
float uy, float d, float fx,
float fy, float T0, float T1,
float T2, float R0, float R1,
float R2) {
float rx = -ux + fx * R1 - y * R2 + ((x * x) * R1) / fx + d * fx * T0 -
d * x * T2 - (x * y * R0) / fx;
float ry = -uy - fy * R0 + x * R2 - d * y * T2 - ((y * y) * R0) / fy +
d * fy * T1 + (x * y * R1) / fy;
return sqrtf(rx * rx + ry * ry);
}
__device__ static float disp_absolute_residual(float Xd, float Yd, float Zd,
float Xm, float Ym, float Zm,
float nx, float ny, float nz,
float T0, float T1, float T2,
float R0, float R1, float R2,
float fx, float b) {
float r = -Xd * nx + Xm * nx - Yd * ny + Ym * ny - Zd * nz + Zm * nz +
nx * T0 + ny * T1 + nz * T2 + Xm * ny * R2 - Xm * nz * R1 -
Ym * nx * R2 + Ym * nz * R0 + Zm * nx * R1 - Zm * ny * R0;
// weight to convert distance units to pixels
r *= fx * b / (Zm * Zm);
return fabsf(r);
}
// Absolute residual for flow multi-camera case
//__global__ void flow_absolute_residual_multicam_GPU(float *d_abs_res, float2
//*d_flow_compact, float *d_Zbuffer_flow_compact, int *d_ind_flow_Zbuffer,
//unsigned int *d_valid_flow_Zbuffer, const float* d_focal_length, const float*
//d_nodal_point_x, const float* d_nodal_point_y, const int* d_n_rows, const int*
//d_n_cols, int n_valid_flow_Zbuffer, const int *d_res_offset_ind, const int
//*d_pixel_ind_offset, const int *d_segment_translation_table, const float
//*d_dTR)
//{
// int ind = blockDim.x*blockIdx.x + threadIdx.x;
// if (ind < n_valid_flow_Zbuffer) {
// // determine current segment
// int segment = d_segment_translation_table[d_valid_flow_Zbuffer[ind]];
// // get segment parameters
// float f = d_focal_length[segment];
// float ox = d_nodal_point_x[segment];
// float oy = d_nodal_point_y[segment];
// int n_rows = d_n_rows[segment];
// int n_cols = d_n_cols[segment];
// int pixel_ind_offset = d_pixel_ind_offset[segment];
// int res_offset = d_res_offset_ind[segment];
// // fetch flow and Zbuffer from global memory
// float2 u = d_flow_compact[ind];
// float disp = __fdividef(1.0f,d_Zbuffer_flow_compact[ind]);
// // compute coordinates
// int pixel_ind = d_ind_flow_Zbuffer[ind] - pixel_ind_offset;
// bool is_ar_flow = (pixel_ind>=(n_rows*n_cols));
// pixel_ind -= (int)is_ar_flow*n_rows*n_cols;
// float y = floorf(__fdividef( (float)pixel_ind , n_cols ));
// float x = (float)pixel_ind - y*n_cols;
// x = x - ox;
// y = y - oy;
// // flip y axis
// y = -y;
// u.y = -u.y;
// // compute absolute residual
// // here the weights will be introduced
// int ind_out = ind + res_offset;
// int s6 = segment*6;
// d_abs_res[ind_out] = flow_absolute_residual(x, y, u.x, u.y, disp, f,
// d_dTR[s6], d_dTR[s6+1], d_dTR[s6+2], d_dTR[s6+3], d_dTR[s6+4], d_dTR[s6+5]);
// }
//}
// Absolute residual for flow
__global__ void flow_absolute_residual_scalable_GPU(
float *d_abs_res, const float2 *d_flow_compact,
const float *d_Zbuffer_flow_compact, const int *d_ind_flow_Zbuffer,
const unsigned int *d_valid_flow_Zbuffer, float fx, float fy, float ox,
float oy, int n_rows, int n_cols, int n_valid_flow_Zbuffer,
const int *d_offset_ind, const int *d_segment_translation_table,
float w_flow, float w_ar_flow, const float *d_dTR) {
int ind = blockDim.x * blockIdx.x + threadIdx.x;
if (ind < n_valid_flow_Zbuffer) {
// determine current segment
int segment = d_segment_translation_table[d_valid_flow_Zbuffer[ind]];
// fetch flow and Zbuffer from global memory
float2 u = d_flow_compact[ind];
float disp = __fdividef(1.0f, d_Zbuffer_flow_compact[ind]);
// compute coordinates
int pixel_ind = d_ind_flow_Zbuffer[ind];
bool is_ar_flow = (pixel_ind >= (n_rows * n_cols));
pixel_ind -= (int)is_ar_flow * n_rows * n_cols;
float y = floorf(__fdividef((float)pixel_ind, n_cols));
float x = (float)pixel_ind - y * n_cols;
x = x - ox;
y = y - oy;
// compute absolute residual
// here the weights will be introduced
float w = is_ar_flow ? w_ar_flow : w_flow;
int ind_out = ind + d_offset_ind[segment];
int s6 = segment * 6;
d_abs_res[ind_out] =
w * flow_absolute_residual(x, y, u.x, u.y, disp, fx, fy, d_dTR[s6],
d_dTR[s6 + 1], d_dTR[s6 + 2], d_dTR[s6 + 3],
d_dTR[s6 + 4], d_dTR[s6 + 5]);
}
}
// Absolute residual for disparity multi-camera case
//__global__ void disp_absolute_residual_multicam_GPU(float *d_abs_res, float
//*d_disparity_compact, float4 *d_Zbuffer_normals_compact, int
//*d_ind_disparity_Zbuffer, unsigned int *d_valid_disparity_Zbuffer, const
//float* d_focal_length, const float* d_nodal_point_x, const float*
//d_nodal_point_y, const float * d_baseline, const int* d_n_cols, int
//n_valid_disparity_Zbuffer, const int *d_res_offset_ind, const int
//*d_pixel_ind_offset, const int *d_segment_translation_table, const float
//*d_dTR)
//{
// int ind = blockDim.x*blockIdx.x + threadIdx.x;
// if (ind < n_valid_disparity_Zbuffer) {
// // determine current segment
// int segment = d_segment_translation_table[d_valid_disparity_Zbuffer[ind]];
// // get segment parameters
// float f = d_focal_length[segment];
// float ox = d_nodal_point_x[segment];
// float oy = d_nodal_point_y[segment];
// float b = d_baseline[segment];
// int n_cols = d_n_cols[segment];
// int pixel_ind_offset = d_pixel_ind_offset[segment];
// int res_offset = d_res_offset_ind[segment];
// // fetch disparity, Zbuffer and normal from global memory
// float disp = d_disparity_compact[ind];
// float4 tmp = d_Zbuffer_normals_compact[ind];
// float Zbuffer = tmp.x;
// float nx = tmp.y;
// float ny = tmp.z;
// float nz = tmp.w;
// // compute coordinates
// int pixel_ind = d_ind_disparity_Zbuffer[ind] - pixel_ind_offset;
// float y = floorf(__fdividef( (float)pixel_ind , n_cols ));
// float x = (float)pixel_ind - y*n_cols;
// x = __fdividef( (x - ox) , f );
// y = -__fdividef( (y - oy) , f );
// // reconstruct 3D point from disparity
// float Zd = -(f*b)/disp;
// float Xd = x*Zd;
// float Yd = y*Zd;
// // reconstruct 3D point from model
// float Zm = Zbuffer;
// float Xm = x*Zm;
// float Ym = y*Zm;
// // compute absolute residual (weighted by disparity vs flow importance)
// int ind_out = ind + res_offset;
// int s6 = segment*6;
// d_abs_res[ind_out] = disp_absolute_residual(Xd, Yd, Zd, Xm, Ym, Zm, nx,
// ny, nz, d_dTR[s6], d_dTR[s6+1], d_dTR[s6+2], d_dTR[s6+3], d_dTR[s6+4],
// d_dTR[s6+5]);
// }
//}
// Absolute residual for disparity
__global__ void disp_absolute_residual_scalable_GPU(
float *d_abs_res, const float *d_disparity_compact,
const float4 *d_Zbuffer_normals_compact, const int *d_ind_disparity_Zbuffer,
const unsigned int *d_valid_disparity_Zbuffer, float fx, float fy, float ox,
float oy, float b, int n_cols, int n_valid_disparity_Zbuffer,
const int *d_offset_ind, const int *d_segment_translation_table,
float w_disp, const float *d_dTR) {
int ind = blockDim.x * blockIdx.x + threadIdx.x;
if (ind < n_valid_disparity_Zbuffer) {
// determine current segment
int segment = d_segment_translation_table[d_valid_disparity_Zbuffer[ind]];
// fetch disparity, Zbuffer and normal from global memory
float disp = d_disparity_compact[ind];
float4 tmp = d_Zbuffer_normals_compact[ind];
float Zbuffer = tmp.x;
float nx = tmp.y;
float ny = tmp.z;
float nz = tmp.w;
// compute coordinates
int pixel_ind = d_ind_disparity_Zbuffer[ind];
float y = floorf(__fdividef((float)pixel_ind, n_cols));
float x = (float)pixel_ind - y * n_cols;
x = __fdividef((x - ox), fx);
y = __fdividef((y - oy), fy);
// reconstruct 3D point from disparity
float Zd = -(fx * b) / disp; // arbitrary use of fx for now
float Xd = x * Zd;
float Yd = y * Zd;
// reconstruct 3D point from model
float Zm = Zbuffer;
float Xm = x * Zm;
float Ym = y * Zm;
// compute absolute residual (weighted by disparity vs flow importance)
int ind_out = ind + d_offset_ind[segment];
int s6 = segment * 6;
d_abs_res[ind_out] =
w_disp * disp_absolute_residual(Xd, Yd, Zd, Xm, Ym, Zm, nx, ny, nz,
d_dTR[s6], d_dTR[s6 + 1], d_dTR[s6 + 2],
d_dTR[s6 + 3], d_dTR[s6 + 4],
d_dTR[s6 + 5], fx, b);
}
}
// Weighted normal equations for flow - multicam case
//__global__ void normal_eqs_flow_weighted_multicam_GPU(float *d_CO, float2
//*d_flow_compact, float *d_Zbuffer_flow_compact, int *d_ind_flow_Zbuffer, const
//float* d_focal_length, const float* d_nodal_point_x, const float*
//d_nodal_point_y, const int* d_n_rows, const int* d_n_cols, const int
//*d_n_values_flow, const int *d_start_ind_flow, const int *d_pixel_ind_offset,
//const float *d_abs_res_scales, const float *d_dTR)
//{
// int n_val_accum = gridDim.x*blockDim.x; // _MAX_N_VAL_ACCUM may not be
// multiple of blocksize
// int n_flow = d_n_values_flow[blockIdx.y];
// int n_accum = (int)ceilf((float)n_flow / (float)n_val_accum);
// int start_ind = d_start_ind_flow[blockIdx.y];
// float f = d_focal_length[blockIdx.y];
// float ox = d_nodal_point_x[blockIdx.y];
// float oy = d_nodal_point_y[blockIdx.y];
// int n_rows = d_n_rows[blockIdx.y];
// int n_cols = d_n_cols[blockIdx.y];
// int pixel_ind_offset = d_pixel_ind_offset[blockIdx.y];
// // initialize accumulators
// float A0 = 0.0f, A1 = 0.0f, A2 = 0.0f, A3 = 0.0f, A4 = 0.0f, A5 = 0.0f, A6 =
// 0.0f, A7 = 0.0f, A8 = 0.0f, A9 = 0.0f, A10 = 0.0f, A11 = 0.0f, A12 = 0.0f,
// A13 = 0.0f, A14 = 0.0f, A15 = 0.0f, A16 = 0.0f, A17 = 0.0f, A18 = 0.0f, A19 =
// 0.0f, A20 = 0.0f, A21 = 0.0f, A22 = 0.0f;
// for (int in_ind = blockDim.x*blockIdx.x*n_accum + threadIdx.x ; in_ind <
// blockDim.x*(blockIdx.x+1)*n_accum ; in_ind += blockDim.x) {
// if (in_ind < n_flow ) { // is this a valid sample?
// // fetch flow and Zbuffer from global memory
// float2 u = d_flow_compact[in_ind+start_ind];
// float disp = __fdividef(1.0f,d_Zbuffer_flow_compact[in_ind+start_ind]);
// // compute coordinates
// int pixel_ind = d_ind_flow_Zbuffer[in_ind+start_ind] - pixel_ind_offset;
// bool is_ar_flow = (pixel_ind>=(n_rows*n_cols));
// pixel_ind -= (int)is_ar_flow*n_rows*n_cols;
// float y = floorf(__fdividef( (float)pixel_ind , n_cols ));
// float x = (float)pixel_ind - y*n_cols;
// x = x - ox;
// y = y - oy;
// // flip y axis
// y = -y;
// u.y = -u.y;
// // determine M-estimation weight
//// float w_rel = is_ar_flow ? w_ar_flow : w_flow;
// int s6 = blockIdx.y*6;
// float w = flow_absolute_residual(x, y, u.x, u.y, disp, f, d_dTR[s6],
// d_dTR[s6+1], d_dTR[s6+2], d_dTR[s6+3], d_dTR[s6+4], d_dTR[s6+5]);
// w /= d_abs_res_scales[blockIdx.y];
// w = (w>1) ? 0 : (1.0f-2.0f*w*w + w*w*w*w);
// /************************/
// /* evaluate constraints */
// /************************/
// // unique values A-matrix
// A0 += w * (disp*disp*f*f);
// A1 += w * (-disp*disp*x*f);
// A2 += w * (-disp*x*y);
// A3 += w * (disp*f*f + disp*x*x);
// A4 += w * (-disp*y*f);
// A5 += w * (-disp*disp*y*f);
// A6 += w * (-disp*f*f - disp*y*y);
// A7 += w * (disp*x*f);
// A8 += w * (disp*disp*x*x + disp*disp*y*y);
// A9 += w * (disp*x*x*y/f + disp*y*f + disp*y*y*y/f);
// A10 += w * (-disp*x*f - disp*x*x*x/f - disp*x*y*y/f);
// A11 += w * (x*x*y*y/(f*f) + f*f + 2.0f*y*y + y*y*y*y/(f*f));
// A12 += w * (-2.0f*x*y - x*x*x*y/(f*f) - x*y*y*y/(f*f));
// A13 += w * (-x*f);
// A14 += w * (f*f + 2.0f*x*x + x*x*x*x/(f*f) + x*x*y*y/(f*f));
// A15 += w * (-y*f);
// A16 += w * (x*x + y*y);
// // B-vector
// A17 += w * (disp*u.x*f);
// A18 += w * (disp*u.y*f);
// A19 += w * (-disp*x*u.x - disp*y*u.y);
// A20 += w * (-x*y*u.x/f - u.y*f - u.y*y*y/f);
// A21 += w * (u.x*f + x*x*u.x/f + x*y*u.y/f);
// A22 += w * (-y*u.x + x*u.y);
// }
// }
// /**************************/
// /* write out accumulators */
// /**************************/
// int out_ind = 23*n_val_accum*blockIdx.y + blockDim.x*blockIdx.x +
// threadIdx.x;
// d_CO[out_ind] = A0;
// d_CO[out_ind+n_val_accum] = A1;
// d_CO[out_ind+2*n_val_accum] = A2;
// d_CO[out_ind+3*n_val_accum] = A3;
// d_CO[out_ind+4*n_val_accum] = A4;
// d_CO[out_ind+5*n_val_accum] = A5;
// d_CO[out_ind+6*n_val_accum] = A6;
// d_CO[out_ind+7*n_val_accum] = A7;
// d_CO[out_ind+8*n_val_accum] = A8;
// d_CO[out_ind+9*n_val_accum] = A9;
// d_CO[out_ind+10*n_val_accum] = A10;
// d_CO[out_ind+11*n_val_accum] = A11;
// d_CO[out_ind+12*n_val_accum] = A12;
// d_CO[out_ind+13*n_val_accum] = A13;
// d_CO[out_ind+14*n_val_accum] = A14;
// d_CO[out_ind+15*n_val_accum] = A15;
// d_CO[out_ind+16*n_val_accum] = A16;
// d_CO[out_ind+17*n_val_accum] = A17;
// d_CO[out_ind+18*n_val_accum] = A18;
// d_CO[out_ind+19*n_val_accum] = A19;
// d_CO[out_ind+20*n_val_accum] = A20;
// d_CO[out_ind+21*n_val_accum] = A21;
// d_CO[out_ind+22*n_val_accum] = A22;
//}
// Weighted normal equations for flow
__global__ void normal_eqs_flow_weighted_GPU(
float *d_CO, const float2 *d_flow_compact,
const float *d_Zbuffer_flow_compact, const int *d_ind_flow_Zbuffer,
float fx, float fy, float ox, float oy, int n_rows, int n_cols,
const int *d_n_values_flow, const int *d_start_ind_flow,
const float *d_abs_res_scales, float w_flow, float w_ar_flow,
const float *d_dTR) {
int n_val_accum = gridDim.x * blockDim.x; // _MAX_N_VAL_ACCUM may not be
// multiple of blocksize
int n_flow = d_n_values_flow[blockIdx.y];
int n_accum = (int)ceilf((float)n_flow / (float)n_val_accum);
int start_ind = d_start_ind_flow[blockIdx.y];
// initialize accumulators
float A0 = 0.0f, A1 = 0.0f, A2 = 0.0f, A3 = 0.0f, A4 = 0.0f, A5 = 0.0f,
A6 = 0.0f, A7 = 0.0f, A8 = 0.0f, A9 = 0.0f, A10 = 0.0f, A11 = 0.0f,
A12 = 0.0f, A13 = 0.0f, A14 = 0.0f, A15 = 0.0f, A16 = 0.0f, A17 = 0.0f,
A18 = 0.0f, A19 = 0.0f, A20 = 0.0f, A21 = 0.0f, A22 = 0.0f;
for (int in_ind = blockDim.x * blockIdx.x * n_accum + threadIdx.x;
in_ind < blockDim.x * (blockIdx.x + 1) * n_accum; in_ind += blockDim.x) {
if (in_ind < n_flow) { // is this a valid sample?
// fetch flow and Zbuffer from global memory
float2 u = d_flow_compact[in_ind + start_ind];
float disp = __fdividef(1.0f, d_Zbuffer_flow_compact[in_ind + start_ind]);
// compute coordinates
int pixel_ind = d_ind_flow_Zbuffer[in_ind + start_ind];
bool is_ar_flow = (pixel_ind >= (n_rows * n_cols));
pixel_ind -= (int)is_ar_flow * n_rows * n_cols;
float y = floorf(__fdividef((float)pixel_ind, n_cols));
float x = (float)pixel_ind - y * n_cols;
x = x - ox;
y = y - oy;
// determine M-estimation weight
float w_rel = is_ar_flow ? w_ar_flow : w_flow;
int s6 = blockIdx.y * 6;
float w = w_rel * flow_absolute_residual(x, y, u.x, u.y, disp, fx, fy,
d_dTR[s6], d_dTR[s6 + 1],
d_dTR[s6 + 2], d_dTR[s6 + 3],
d_dTR[s6 + 4], d_dTR[s6 + 5]);
w /= d_abs_res_scales[blockIdx.y];
w = (w > 1) ? 0 : (1.0f - 2.0f * w * w + w * w * w * w);
/************************/
/* evaluate constraints */
/************************/
// unique values A-matrix
A0 += w * (disp * disp * fx * fx);
A1 += w * (-disp * disp * x * fx);
A2 += w * (-disp * x * y);
A3 += w * (disp * fx * fx + disp * x * x);
A4 += w * (-disp * y * fx);
A5 += w * (-disp * disp * y * fy);
A6 += w * (-disp * fy * fy - disp * y * y); //!!!!
A7 += w * (disp * x * fy);
A8 += w * (disp * disp * x * x + disp * disp * y * y);
A9 += w * (disp * x * x * y / fx + disp * y * fy + disp * y * y * y / fy);
A10 +=
w * (-disp * x * fx - disp * x * x * x / fx - disp * x * y * y / fy);
A11 += w * (x * x * y * y / (fx * fx) + fy * fy + 2.0f * y * y +
y * y * y * y / (fy * fy));
A12 += w * (-2.0f * x * y - x * x * x * y / (fx * fx) -
x * y * y * y / (fy * fy));
A13 += w * (x * y * y / fx - x * fy - x * y * y / fy);
A14 += w * (fx * fx + 2.0f * x * x + x * x * x * x / (fx * fx) +
x * x * y * y / (fy * fy));
A15 += w * (-y * fx - x * x * y / fx + x * x * y / fy);
A16 += w * (x * x + y * y);
// B-vector
A17 += w * (disp * u.x * fx);
A18 += w * (disp * u.y * fy);
A19 += w * (-disp * x * u.x - disp * y * u.y);
A20 += w * (-x * y * u.x / fx - u.y * fy - u.y * y * y / fy);
A21 += w * (u.x * fx + x * x * u.x / fx + x * y * u.y / fy);
A22 += w * (-y * u.x + x * u.y);
}
}
/**************************/
/* write out accumulators */
/**************************/
int out_ind =
23 * n_val_accum * blockIdx.y + blockDim.x * blockIdx.x + threadIdx.x;
d_CO[out_ind] = A0;
d_CO[out_ind + n_val_accum] = A1;
d_CO[out_ind + 2 * n_val_accum] = A2;
d_CO[out_ind + 3 * n_val_accum] = A3;
d_CO[out_ind + 4 * n_val_accum] = A4;
d_CO[out_ind + 5 * n_val_accum] = A5;
d_CO[out_ind + 6 * n_val_accum] = A6;
d_CO[out_ind + 7 * n_val_accum] = A7;
d_CO[out_ind + 8 * n_val_accum] = A8;
d_CO[out_ind + 9 * n_val_accum] = A9;
d_CO[out_ind + 10 * n_val_accum] = A10;
d_CO[out_ind + 11 * n_val_accum] = A11;
d_CO[out_ind + 12 * n_val_accum] = A12;
d_CO[out_ind + 13 * n_val_accum] = A13;
d_CO[out_ind + 14 * n_val_accum] = A14;
d_CO[out_ind + 15 * n_val_accum] = A15;
d_CO[out_ind + 16 * n_val_accum] = A16;
d_CO[out_ind + 17 * n_val_accum] = A17;
d_CO[out_ind + 18 * n_val_accum] = A18;
d_CO[out_ind + 19 * n_val_accum] = A19;
d_CO[out_ind + 20 * n_val_accum] = A20;
d_CO[out_ind + 21 * n_val_accum] = A21;
d_CO[out_ind + 22 * n_val_accum] = A22;
}
// Unweighted normal equations for disparity
//__global__ void normal_eqs_disparity_weighted_multicam_GPU(float *d_CD, float
//*d_disparity_compact, float4 *d_Zbuffer_normals_compact, int
//*d_ind_disparity_Zbuffer, const float* d_focal_length, const float*
//d_nodal_point_x, const float* d_nodal_point_y, const float* d_baseline, const
//int* d_n_cols, const int *d_n_values_disparity, const int
//*d_start_ind_disparity, const int *d_pixel_ind_offset, const float
//*d_abs_res_scales, const float *d_dTR)
//{
// int n_val_accum = gridDim.x*blockDim.x; // n_val_accum may not be multiple
// of blocksize
// int n_disparity = d_n_values_disparity[blockIdx.y];
// int n_accum = (int)ceilf((float)n_disparity / (float)n_val_accum);
// int start_ind = d_start_ind_disparity[blockIdx.y];
// float f = d_focal_length[blockIdx.y];
// float ox = d_nodal_point_x[blockIdx.y];
// float oy = d_nodal_point_y[blockIdx.y];
// float b = d_baseline[blockIdx.y];
// int n_cols = d_n_cols[blockIdx.y];
// int pixel_ind_offset = d_pixel_ind_offset[blockIdx.y];
// // initialize accumulators
// float A0 = 0.0f, A1 = 0.0f, A2 = 0.0f, A3 = 0.0f, A4 = 0.0f, A5 = 0.0f, A6 =
// 0.0f, A7 = 0.0f, A8 = 0.0f, A9 = 0.0f, A10 = 0.0f, A11 = 0.0f, A12 = 0.0f,
// A13 = 0.0f, A14 = 0.0f, A15 = 0.0f, A16 = 0.0f, A17 = 0.0f, A18 = 0.0f, A19 =
// 0.0f, A20 = 0.0f, A21 = 0.0f, A22 = 0.0f, A23 = 0.0f, A24 = 0.0f, A25 = 0.0f,
// A26 = 0.0f;
// for (int in_ind = blockDim.x*blockIdx.x*n_accum + threadIdx.x ; in_ind <
// blockDim.x*(blockIdx.x+1)*n_accum ; in_ind += blockDim.x) {
// if (in_ind < n_disparity ) { // is this a valid sample?
// // fetch disparity, Zbuffer and normal from global memory
// float disp = d_disparity_compact[in_ind+start_ind];
// float4 tmp = d_Zbuffer_normals_compact[in_ind+start_ind];
// float Zbuffer = tmp.x;
// float nx = tmp.y;
// float ny = tmp.z;
// float nz = tmp.w;
// // compute coordinates
// int pixel_ind = d_ind_disparity_Zbuffer[in_ind+start_ind] -
// pixel_ind_offset;
// float y = floorf(__fdividef( (float)pixel_ind , n_cols ));
// float x = (float)pixel_ind - y*n_cols;
// x = __fdividef( (x - ox) , f );
// y = -__fdividef( (y - oy) , f );
// // reconstruct 3D point from disparity
// float Zd = -(f*b)/disp;
// float Xd = x*Zd;
// float Yd = y*Zd;
// // reconstruct 3D point from model
// float Zm = Zbuffer;
// float Xm = x*Zm;
// float Ym = y*Zm;
// // determine M-estimation weight
// // disparity residual weighed by rel. importance disp vs flow
// int s6 = blockIdx.y*6;
// float w = disp_absolute_residual(Xd, Yd, Zd, Xm, Ym, Zm, nx, ny, nz,
// d_dTR[s6], d_dTR[s6+1], d_dTR[s6+2], d_dTR[s6+3], d_dTR[s6+4], d_dTR[s6+5]);
// w /= d_abs_res_scales[blockIdx.y];
// w = (w>1) ? 0 : (1.0f-2.0f*w*w + w*w*w*w);
// /************************/
// /* evaluate constraints */
// /************************/
// // unique values A-matrix
// A0 += w * (nx*nx);
// A1 += w * (nx*ny);
// A2 += w * (nx*nz);
// A3 += w * (Ym*nx*nz-Zm*nx*ny);
// A4 += w * (Zm*(nx*nx)-Xm*nx*nz);
// A5 += w * (-Ym*(nx*nx)+Xm*nx*ny);
// A6 += w * (ny*ny);
// A7 += w * (ny*nz);
// A8 += w * (-Zm*(ny*ny)+Ym*ny*nz);
// A9 += w * (-Xm*ny*nz+Zm*nx*ny);
// A10 += w * (Xm*(ny*ny)-Ym*nx*ny);
// A11 += w * (nz*nz);
// A12 += w * (Ym*(nz*nz)-Zm*ny*nz);
// A13 += w * (-Xm*(nz*nz)+Zm*nx*nz);
// A14 += w * (Xm*ny*nz-Ym*nx*nz);
// A15 += w * ((Ym*Ym)*(nz*nz)+(Zm*Zm)*(ny*ny)-Ym*Zm*ny*nz*2.0f);
// A16 += w * (-Xm*Ym*(nz*nz)-(Zm*Zm)*nx*ny+Xm*Zm*ny*nz+Ym*Zm*nx*nz);
// A17 += w * (-Xm*Zm*(ny*ny)-(Ym*Ym)*nx*nz+Xm*Ym*ny*nz+Ym*Zm*nx*ny);
// A18 += w * ((Xm*Xm)*(nz*nz)+(Zm*Zm)*(nx*nx)-Xm*Zm*nx*nz*2.0f);
// A19 += w * (-Ym*Zm*(nx*nx)-(Xm*Xm)*ny*nz+Xm*Ym*nx*nz+Xm*Zm*nx*ny);
// A20 += w * ((Xm*Xm)*(ny*ny)+(Ym*Ym)*(nx*nx)-Xm*Ym*nx*ny*2.0f);
// // B-vector
// A21 += w * (Xd*(nx*nx)-Xm*(nx*nx)+Yd*nx*ny-Ym*nx*ny+Zd*nx*nz-Zm*nx*nz);
// A22 += w * (Yd*(ny*ny)-Ym*(ny*ny)+Xd*nx*ny-Xm*nx*ny+Zd*ny*nz-Zm*ny*nz);
// A23 += w * (Zd*(nz*nz)-Zm*(nz*nz)+Xd*nx*nz-Xm*nx*nz+Yd*ny*nz-Ym*ny*nz);
// A24 += w *
// (-Yd*Zm*(ny*ny)+Ym*Zd*(nz*nz)+Ym*Zm*(ny*ny)-Ym*Zm*(nz*nz)-(Ym*Ym)*ny*nz+(Zm*Zm)*ny*nz+Xd*Ym*nx*nz-Xm*Ym*nx*nz-Xd*Zm*nx*ny+Yd*Ym*ny*nz+Xm*Zm*nx*ny-Zd*Zm*ny*nz);
// A25 += w *
// (Xd*Zm*(nx*nx)-Xm*Zd*(nz*nz)-Xm*Zm*(nx*nx)+Xm*Zm*(nz*nz)+(Xm*Xm)*nx*nz-(Zm*Zm)*nx*nz-Xd*Xm*nx*nz-Xm*Yd*ny*nz+Xm*Ym*ny*nz+Yd*Zm*nx*ny-Ym*Zm*nx*ny+Zd*Zm*nx*nz);
// A26 += w *
// (-Xd*Ym*(nx*nx)+Xm*Yd*(ny*ny)+Xm*Ym*(nx*nx)-Xm*Ym*(ny*ny)-(Xm*Xm)*nx*ny+(Ym*Ym)*nx*ny+Xd*Xm*nx*ny-Yd*Ym*nx*ny+Xm*Zd*ny*nz-Xm*Zm*ny*nz-Ym*Zd*nx*nz+Ym*Zm*nx*nz);
// }
// }
// /**************************/
// /* write out accumulators */
// /**************************/
// int out_ind = 27*n_val_accum*blockIdx.y + blockDim.x*blockIdx.x +
// threadIdx.x;
// d_CD[out_ind] = A0;
// d_CD[out_ind+n_val_accum] = A1;
// d_CD[out_ind+2*n_val_accum] = A2;
// d_CD[out_ind+3*n_val_accum] = A3;
// d_CD[out_ind+4*n_val_accum] = A4;
// d_CD[out_ind+5*n_val_accum] = A5;
// d_CD[out_ind+6*n_val_accum] = A6;
// d_CD[out_ind+7*n_val_accum] = A7;
// d_CD[out_ind+8*n_val_accum] = A8;
// d_CD[out_ind+9*n_val_accum] = A9;
// d_CD[out_ind+10*n_val_accum] = A10;
// d_CD[out_ind+11*n_val_accum] = A11;
// d_CD[out_ind+12*n_val_accum] = A12;
// d_CD[out_ind+13*n_val_accum] = A13;
// d_CD[out_ind+14*n_val_accum] = A14;
// d_CD[out_ind+15*n_val_accum] = A15;
// d_CD[out_ind+16*n_val_accum] = A16;
// d_CD[out_ind+17*n_val_accum] = A17;
// d_CD[out_ind+18*n_val_accum] = A18;
// d_CD[out_ind+19*n_val_accum] = A19;
// d_CD[out_ind+20*n_val_accum] = A20;
// d_CD[out_ind+21*n_val_accum] = A21;
// d_CD[out_ind+22*n_val_accum] = A22;
// d_CD[out_ind+23*n_val_accum] = A23;
// d_CD[out_ind+24*n_val_accum] = A24;
// d_CD[out_ind+25*n_val_accum] = A25;
// d_CD[out_ind+26*n_val_accum] = A26;
//}
// Weighted normal equations for disparity
__global__ void normal_eqs_disparity_weighted_GPU(
float *d_CD, const float *d_disparity_compact,
const float4 *d_Zbuffer_normals_compact, const int *d_ind_disparity_Zbuffer,
float fx, float fy, float ox, float oy, float b, int n_cols,
const int *d_n_values_disparity, const int *d_start_ind_disparity,
const float *d_abs_res_scales, float w_disp, const float *d_dTR) {
int n_val_accum =
gridDim.x * blockDim.x; // n_val_accum may not be multiple of blocksize
int n_disparity = d_n_values_disparity[blockIdx.y];
int n_accum = (int)ceilf((float)n_disparity / (float)n_val_accum);
int start_ind = d_start_ind_disparity[blockIdx.y];
// initialize accumulators
float A0 = 0.0f, A1 = 0.0f, A2 = 0.0f, A3 = 0.0f, A4 = 0.0f, A5 = 0.0f,
A6 = 0.0f, A7 = 0.0f, A8 = 0.0f, A9 = 0.0f, A10 = 0.0f, A11 = 0.0f,
A12 = 0.0f, A13 = 0.0f, A14 = 0.0f, A15 = 0.0f, A16 = 0.0f, A17 = 0.0f,
A18 = 0.0f, A19 = 0.0f, A20 = 0.0f, A21 = 0.0f, A22 = 0.0f, A23 = 0.0f,
A24 = 0.0f, A25 = 0.0f, A26 = 0.0f;
for (int in_ind = blockDim.x * blockIdx.x * n_accum + threadIdx.x;
in_ind < blockDim.x * (blockIdx.x + 1) * n_accum; in_ind += blockDim.x) {
if (in_ind < n_disparity) { // is this a valid sample?
// fetch disparity, Zbuffer and normal from global memory
float disp = d_disparity_compact[in_ind + start_ind];
float4 tmp = d_Zbuffer_normals_compact[in_ind + start_ind];
float Zbuffer = tmp.x;
float nx = tmp.y;
float ny = tmp.z;
float nz = tmp.w;
// compute coordinates
int pixel_ind = d_ind_disparity_Zbuffer[in_ind + start_ind];
float y = floorf(__fdividef((float)pixel_ind, n_cols));
float x = (float)pixel_ind - y * n_cols;
x = __fdividef((x - ox), fx);
y = __fdividef((y - oy), fy);
// reconstruct 3D point from disparity
float Zd = -(fx * b) / disp; // arbitrary use of fx
float Xd = x * Zd;
float Yd = y * Zd;
// reconstruct 3D point from model
float Zm = Zbuffer;
float Xm = x * Zm;
float Ym = y * Zm;
// determine M-estimation weight
// disparity residual weighed by rel. importance disp vs flow
int s6 = blockIdx.y * 6;
float w = w_disp * disp_absolute_residual(
Xd, Yd, Zd, Xm, Ym, Zm, nx, ny, nz, d_dTR[s6],
d_dTR[s6 + 1], d_dTR[s6 + 2], d_dTR[s6 + 3],
d_dTR[s6 + 4], d_dTR[s6 + 5], fx, b);
w /= d_abs_res_scales[blockIdx.y];
w = (w > 1) ? 0 : (1.0f - 2.0f * w * w + w * w * w * w);
// multiply m estimation weight with distance->pixel conversion weight
// (squared)
w *= (fx * fx * b * b) / (Zm * Zm * Zm * Zm);
/************************/
/* evaluate constraints */
/************************/
// unique values A-matrix
A0 += w * (nx * nx);
A1 += w * (nx * ny);
A2 += w * (nx * nz);
A3 += w * (Ym * nx * nz - Zm * nx * ny);
A4 += w * (Zm * (nx * nx) - Xm * nx * nz);
A5 += w * (-Ym * (nx * nx) + Xm * nx * ny);
A6 += w * (ny * ny);
A7 += w * (ny * nz);
A8 += w * (-Zm * (ny * ny) + Ym * ny * nz);
A9 += w * (-Xm * ny * nz + Zm * nx * ny);
A10 += w * (Xm * (ny * ny) - Ym * nx * ny);
A11 += w * (nz * nz);
A12 += w * (Ym * (nz * nz) - Zm * ny * nz);
A13 += w * (-Xm * (nz * nz) + Zm * nx * nz);
A14 += w * (Xm * ny * nz - Ym * nx * nz);
A15 += w * ((Ym * Ym) * (nz * nz) + (Zm * Zm) * (ny * ny) -
Ym * Zm * ny * nz * 2.0f);
A16 += w * (-Xm * Ym * (nz * nz) - (Zm * Zm) * nx * ny +
Xm * Zm * ny * nz + Ym * Zm * nx * nz);
A17 += w * (-Xm * Zm * (ny * ny) - (Ym * Ym) * nx * nz +
Xm * Ym * ny * nz + Ym * Zm * nx * ny);
A18 += w * ((Xm * Xm) * (nz * nz) + (Zm * Zm) * (nx * nx) -
Xm * Zm * nx * nz * 2.0f);
A19 += w * (-Ym * Zm * (nx * nx) - (Xm * Xm) * ny * nz +
Xm * Ym * nx * nz + Xm * Zm * nx * ny);
A20 += w * ((Xm * Xm) * (ny * ny) + (Ym * Ym) * (nx * nx) -
Xm * Ym * nx * ny * 2.0f);
// B-vector
A21 += w * (Xd * (nx * nx) - Xm * (nx * nx) + Yd * nx * ny -
Ym * nx * ny + Zd * nx * nz - Zm * nx * nz);
A22 += w * (Yd * (ny * ny) - Ym * (ny * ny) + Xd * nx * ny -
Xm * nx * ny + Zd * ny * nz - Zm * ny * nz);
A23 += w * (Zd * (nz * nz) - Zm * (nz * nz) + Xd * nx * nz -
Xm * nx * nz + Yd * ny * nz - Ym * ny * nz);
A24 += w *
(-Yd * Zm * (ny * ny) + Ym * Zd * (nz * nz) + Ym * Zm * (ny * ny) -
Ym * Zm * (nz * nz) - (Ym * Ym) * ny * nz + (Zm * Zm) * ny * nz +
Xd * Ym * nx * nz - Xm * Ym * nx * nz - Xd * Zm * nx * ny +
Yd * Ym * ny * nz + Xm * Zm * nx * ny - Zd * Zm * ny * nz);
A25 +=
w * (Xd * Zm * (nx * nx) - Xm * Zd * (nz * nz) - Xm * Zm * (nx * nx) +
Xm * Zm * (nz * nz) + (Xm * Xm) * nx * nz - (Zm * Zm) * nx * nz -
Xd * Xm * nx * nz - Xm * Yd * ny * nz + Xm * Ym * ny * nz +
Yd * Zm * nx * ny - Ym * Zm * nx * ny + Zd * Zm * nx * nz);
A26 += w *
(-Xd * Ym * (nx * nx) + Xm * Yd * (ny * ny) + Xm * Ym * (nx * nx) -
Xm * Ym * (ny * ny) - (Xm * Xm) * nx * ny + (Ym * Ym) * nx * ny +
Xd * Xm * nx * ny - Yd * Ym * nx * ny + Xm * Zd * ny * nz -
Xm * Zm * ny * nz - Ym * Zd * nx * nz + Ym * Zm * nx * nz);
}
}
/**************************/
/* write out accumulators */
/**************************/
int out_ind =
27 * n_val_accum * blockIdx.y + blockDim.x * blockIdx.x + threadIdx.x;
w_disp *= w_disp; // weight relative to flow
d_CD[out_ind] = w_disp * A0;
d_CD[out_ind + n_val_accum] = w_disp * A1;
d_CD[out_ind + 2 * n_val_accum] = w_disp * A2;
d_CD[out_ind + 3 * n_val_accum] = w_disp * A3;
d_CD[out_ind + 4 * n_val_accum] = w_disp * A4;
d_CD[out_ind + 5 * n_val_accum] = w_disp * A5;
d_CD[out_ind + 6 * n_val_accum] = w_disp * A6;
d_CD[out_ind + 7 * n_val_accum] = w_disp * A7;
d_CD[out_ind + 8 * n_val_accum] = w_disp * A8;
d_CD[out_ind + 9 * n_val_accum] = w_disp * A9;
d_CD[out_ind + 10 * n_val_accum] = w_disp * A10;
d_CD[out_ind + 11 * n_val_accum] = w_disp * A11;
d_CD[out_ind + 12 * n_val_accum] = w_disp * A12;
d_CD[out_ind + 13 * n_val_accum] = w_disp * A13;
d_CD[out_ind + 14 * n_val_accum] = w_disp * A14;
d_CD[out_ind + 15 * n_val_accum] = w_disp * A15;
d_CD[out_ind + 16 * n_val_accum] = w_disp * A16;
d_CD[out_ind + 17 * n_val_accum] = w_disp * A17;
d_CD[out_ind + 18 * n_val_accum] = w_disp * A18;
d_CD[out_ind + 19 * n_val_accum] = w_disp * A19;
d_CD[out_ind + 20 * n_val_accum] = w_disp * A20;
d_CD[out_ind + 21 * n_val_accum] = w_disp * A21;
d_CD[out_ind + 22 * n_val_accum] = w_disp * A22;
d_CD[out_ind + 23 * n_val_accum] = w_disp * A23;
d_CD[out_ind + 24 * n_val_accum] = w_disp * A24;
d_CD[out_ind + 25 * n_val_accum] = w_disp * A25;
d_CD[out_ind + 26 * n_val_accum] = w_disp * A26;
}
///////////////////////
// //
// KERNEL WRAPPERS //
// //
///////////////////////
void computeResidualFlow(float *d_res_flowx, float *d_res_flowy,
float *d_res_ar_flowx, float *d_res_ar_flowy,
const float *d_flowx, const float *d_flowy,
const float *d_ar_flowx, const float *d_ar_flowy,
const float *d_delta_T_accum,
const float *d_delta_Rmat_accum, const float *d_init_Z,
const cudaArray *d_segment_ind, int n_cols, int n_rows,
float nodal_point_x, float nodal_point_y,
float focal_length_x, float focal_length_y) {
// Bind textures to arrays
cudaChannelFormatDesc channelFloat = cudaCreateChannelDesc<float>();
cudaBindTextureToArray(d_segmentINDArray_texture, d_segment_ind,
channelFloat);
dim3 TB(16, 12, 1);
dim3 BG(divUp(n_cols, TB.x), divUp(n_rows, TB.y));
compute_residual_flow_GPU << <BG, TB>>>
(d_res_flowx, d_res_flowy, d_flowx, d_flowy, d_delta_T_accum,
d_delta_Rmat_accum, d_init_Z, n_cols, n_rows, nodal_point_x,
nodal_point_y, focal_length_x, focal_length_y);
compute_residual_flow_GPU << <BG, TB>>>
(d_res_ar_flowx, d_res_ar_flowy, d_ar_flowx, d_ar_flowy, d_delta_T_accum,
d_delta_Rmat_accum, d_init_Z, n_cols, n_rows, nodal_point_x,
nodal_point_y, focal_length_x, focal_length_y);
cudaUnbindTexture(d_segmentINDArray_texture);
}
void markValidFlowZbufferAndZbufferZeroBased(
unsigned int *d_valid_ar_flow_Zbuffer, unsigned int *d_valid_Zbuffer,
const float *d_ar_flowx, const cudaArray *d_segmentINDArray, int n_cols,
int n_rows, int n_objects) {
// Bind textures to arrays
cudaChannelFormatDesc channelFloat = cudaCreateChannelDesc<float>();
cudaBindTextureToArray(d_segmentINDArray_texture, d_segmentINDArray,
channelFloat);
// Mark valid locations
dim3 threadBlock_mark(16, 16, 1);
dim3 blockGrid_mark(divUp(n_cols, threadBlock_mark.x),
divUp(n_rows, threadBlock_mark.y));
mark_valid_flow_Zbuffer_and_Zbuffer_zero_based_GPU
<< <blockGrid_mark, threadBlock_mark>>>
(d_valid_ar_flow_Zbuffer, d_valid_Zbuffer, d_ar_flowx, n_cols, n_rows,
n_objects);
cudaUnbindTexture(d_segmentINDArray_texture);
}
void mark_with_zero_based_segmentIND(
unsigned int *d_valid_flow_Zbuffer, unsigned int *d_valid_disparity_Zbuffer,
const float *d_flowx, const float *d_ar_flowx, const char *d_disparity,
const cudaArray *d_segmentINDArray, int n_cols, int n_rows, int n_objects,
int d_disparity_pitch, bool mark_flow, bool mark_ar_flow,
bool mark_disparity, int segments_to_update) {
cudaChannelFormatDesc channelFloat = cudaCreateChannelDesc<float>();
cudaBindTextureToArray(d_segmentINDArray_texture, d_segmentINDArray,
channelFloat);
dim3 threadBlock(16, 16, 1);
dim3 blockGrid(divUp(n_cols, threadBlock.x), divUp(n_rows, threadBlock.y));
mark_with_zero_based_segmentIND_GPU << <blockGrid, threadBlock>>>
(d_valid_flow_Zbuffer, d_valid_disparity_Zbuffer, d_flowx, d_ar_flowx,
d_disparity, n_cols, n_rows, n_objects, d_disparity_pitch, mark_flow,
mark_ar_flow, mark_disparity, segments_to_update);
cudaUnbindTexture(d_segmentINDArray_texture);
}
void subsample_ind_and_labels(int *d_ind_sub, const int *d_ind,
unsigned int *d_label_sub,
const unsigned int *d_label, int n_out,
float inv_sub_factor) {
dim3 threadBlock(256, 1);
dim3 blockGrid(divUp(n_out, threadBlock.x), 1);
subsample_ind_and_labels_GPU << <blockGrid, threadBlock>>>
(d_ind_sub, d_ind, d_label_sub, d_label, n_out, inv_sub_factor);
}
void gather_valid_flow_Zbuffer(float2 *d_flow_compact, float *d_Zbuffer_compact,
const float *d_flowx, const float *d_flowy,
const float *d_ar_flowx, const float *d_ar_flowy,
int *d_ind_flow_Zbuffer,
const cudaArray *d_ZbufferArray,
int n_valid_flow_Zbuffer, int n_cols, int n_rows,
float Z_conv1, float Z_conv2,
int ind_flow_offset) {
cudaChannelFormatDesc channelFloat = cudaCreateChannelDesc<float>();
cudaBindTextureToArray(d_Zbuffer_texture, d_ZbufferArray, channelFloat);
dim3 threadBlock(256, 1);
dim3 blockGrid(divUp(n_valid_flow_Zbuffer, threadBlock.x), 1);
gather_valid_flow_Zbuffer_GPU << <blockGrid, threadBlock>>>
(d_flow_compact, d_Zbuffer_compact, d_flowx, d_flowy, d_ar_flowx,
d_ar_flowy, d_ind_flow_Zbuffer, n_valid_flow_Zbuffer, n_cols, n_rows,
Z_conv1, Z_conv2, ind_flow_offset);
cudaUnbindTexture(d_Zbuffer_texture);
}
void gather_valid_disparity_Zbuffer(
float *d_disparity_compact, float4 *d_Zbuffer_normals_compact,
const char *d_disparity, int *d_ind_disparity_Zbuffer,
const cudaArray *d_ZbufferArray, const cudaArray *d_normalXArray,
const cudaArray *d_normalYArray, const cudaArray *d_normalZArray,
int n_valid_disparity_Zbuffer, int n_cols, int n_rows, float Z_conv1,
float Z_conv2, int disparity_pitch, int ind_disp_offset) {
cudaChannelFormatDesc channelFloat = cudaCreateChannelDesc<float>();
cudaBindTextureToArray(d_Zbuffer_texture, d_ZbufferArray, channelFloat);
cudaBindTextureToArray(d_normalXArray_texture, d_normalXArray, channelFloat);
cudaBindTextureToArray(d_normalYArray_texture, d_normalYArray, channelFloat);
cudaBindTextureToArray(d_normalZArray_texture, d_normalZArray, channelFloat);
dim3 threadBlock(256, 1);
dim3 blockGrid(divUp(n_valid_disparity_Zbuffer, threadBlock.x), 1);
gather_valid_disparity_Zbuffer_GPU << <blockGrid, threadBlock>>>
(d_disparity_compact, d_Zbuffer_normals_compact, d_disparity,
d_ind_disparity_Zbuffer, n_valid_disparity_Zbuffer, n_cols, n_rows,
Z_conv1, Z_conv2, disparity_pitch, ind_disp_offset);
cudaUnbindTexture(d_normalZArray_texture);
cudaUnbindTexture(d_normalYArray_texture);
cudaUnbindTexture(d_normalXArray_texture);
cudaUnbindTexture(d_Zbuffer_texture);
}
void normal_eqs_disparity(dim3 blockGrid, dim3 threadBlock, float *d_CD,
const float *d_disparity_compact,
const float4 *d_Zbuffer_normals_compact,
const int *d_ind_disparity_Zbuffer, float fx,
float fy, float ox, float oy, float b, int n_cols,
const int *d_n_values_disparity,
const int *d_start_ind_disparity, float w_disp) {
normal_eqs_disparity_GPU << <blockGrid, threadBlock>>>
(d_CD, d_disparity_compact, d_Zbuffer_normals_compact,
d_ind_disparity_Zbuffer, fx, fy, ox, oy, b, n_cols, d_n_values_disparity,
d_start_ind_disparity, w_disp);
}
void reduce_normal_eqs_64_mult_constr(dim3 blockGrid, dim3 threadBlock,
float *d_C_reduced, const float *d_C,
int gridDim_x_normal_equations,
int n_constraints) {
reduce_normal_eqs_64_mult_constr_GPU << <blockGrid, threadBlock>>>
(d_C_reduced, d_C, gridDim_x_normal_equations, n_constraints);
}
void flow_absolute_residual_scalable(
dim3 blockGrid, dim3 threadBlock, float *d_abs_res,
const float2 *d_flow_compact, const float *d_Zbuffer_flow_compact,
const int *d_ind_flow_Zbuffer, const unsigned int *d_valid_flow_Zbuffer,
float fx, float fy, float ox, float oy, int n_rows, int n_cols,
int n_valid_flow_Zbuffer, const int *d_offset_ind,
const int *d_segment_translation_table, float w_flow, float w_ar_flow,
const float *d_dTR) {
flow_absolute_residual_scalable_GPU << <blockGrid, threadBlock>>>
(d_abs_res, d_flow_compact, d_Zbuffer_flow_compact, d_ind_flow_Zbuffer,
d_valid_flow_Zbuffer, fx, fy, ox, oy, n_rows, n_cols,
n_valid_flow_Zbuffer, d_offset_ind, d_segment_translation_table, w_flow,
w_ar_flow, d_dTR);
}
void disp_absolute_residual_scalable(
dim3 blockGrid, dim3 threadBlock, float *d_abs_res,
const float *d_disparity_compact, const float4 *d_Zbuffer_normals_compact,
const int *d_ind_disparity_Zbuffer,
const unsigned int *d_valid_disparity_Zbuffer, float fx, float fy, float ox,
float oy, float b, int n_cols, int n_valid_disparity_Zbuffer,
const int *d_offset_ind, const int *d_segment_translation_table,
float w_disp, const float *d_dTR) {
disp_absolute_residual_scalable_GPU << <blockGrid, threadBlock>>>
(d_abs_res, d_disparity_compact, d_Zbuffer_normals_compact,
d_ind_disparity_Zbuffer, d_valid_disparity_Zbuffer, fx, fy, ox, oy, b,
n_cols, n_valid_disparity_Zbuffer, d_offset_ind,
d_segment_translation_table, w_disp, d_dTR);
}
void normal_eqs_flow(dim3 blockGrid, dim3 threadBlock, float *d_CO,
const float2 *d_flow_compact,
const float *d_Zbuffer_flow_compact,
const int *d_ind_flow_Zbuffer, float fx, float fy,
float ox, float oy, int n_rows, int n_cols,
const int *d_n_values_flow, const int *d_start_ind_flow) {
// HACK (need to fix): instability arises here when focal lenghts are unequal
fx = (fx + fy) / 2.0f;
fy = fx;
normal_eqs_flow_GPU << <blockGrid, threadBlock>>>
(d_CO, d_flow_compact, d_Zbuffer_flow_compact, d_ind_flow_Zbuffer, fx, fy,
ox, oy, n_rows, n_cols, d_n_values_flow, d_start_ind_flow);
}
void normal_eqs_flow_weighted(dim3 blockGrid, dim3 threadBlock, float *d_CO,
const float2 *d_flow_compact,
const float *d_Zbuffer_flow_compact,
const int *d_ind_flow_Zbuffer, float fx, float fy,
float ox, float oy, int n_rows, int n_cols,
const int *d_n_values_flow,
const int *d_start_ind_flow,
const float *d_abs_res_scales, float w_flow,
float w_ar_flow, const float *d_dTR) {
// HACK (need to fix): instability arises here when focal lenghts are unequal
fx = (fx + fy) / 2.0f;
fy = fx;
normal_eqs_flow_weighted_GPU << <blockGrid, threadBlock>>>
(d_CO, d_flow_compact, d_Zbuffer_flow_compact, d_ind_flow_Zbuffer, fx, fy,
ox, oy, n_rows, n_cols, d_n_values_flow, d_start_ind_flow,
d_abs_res_scales, w_flow, w_ar_flow, d_dTR);
}
void normal_eqs_disparity_weighted(
dim3 blockGrid, dim3 threadBlock, float *d_CD,
const float *d_disparity_compact, const float4 *d_Zbuffer_normals_compact,
const int *d_ind_disparity_Zbuffer, float fx, float fy, float ox, float oy,
float b, int n_cols, const int *d_n_values_disparity,
const int *d_start_ind_disparity, const float *d_abs_res_scales,
float w_disp, const float *d_dTR) {
normal_eqs_disparity_weighted_GPU << <blockGrid, threadBlock>>>
(d_CD, d_disparity_compact, d_Zbuffer_normals_compact,
d_ind_disparity_Zbuffer, fx, fy, ox, oy, b, n_cols, d_n_values_disparity,
d_start_ind_disparity, d_abs_res_scales, w_disp, d_dTR);
}
}
|
the_stack
|
#include <kat/on_device/common.cuh>
///@cond
#include <kat/detail/execution_space_specifiers.hpp>
///@endcond
namespace kat {
/**
* @brief Uniform-naming scheme, templated-when-relevant wrappers of single PTX instruction
*
* @note should contain wrappers for all instructions which are not trivially
* producible with simple C++ code (e.g. no add or subtract)
*/
namespace builtins {
// Arithmetic
// --------------------------------------------
/**
* When multiplying two n-bit numbers, the result may take up to 2n bits.
* without upcasting, the value of x * y is the lower n bits of the result;
* this lets you get the upper bits, without performing a 2n-by-2n multiplication
*/
template <typename I> KAT_FD I multiplication_high_bits(I x, I y);
/**
* Division which becomes faster and less precise than regular "/",
* when --use-fast-math is specified; otherwise it's the same as regular "/".
*/
template <typename F> KAT_FD F divide(F dividend, F divisor);
/**
* @brief clamps the input value to the unit segment [0.0,+1.0].
*
* @note behavior undefined for nan/infinity/etc.
*
* @return max(0.0,min(1.0,x))
*/
template <typename F> KAT_FD F clamp_to_unit_segment(F x);
template <typename T> KAT_FD T absolute_value(T x)
{
static_assert(std::is_unsigned<T>::value,
"There is no generic implementation of absolute value for signed types, only for a few specific ones");
return x;
}
template <typename T> KAT_FD T minimum(T x, T y) = delete; // don't worry, it's not really deleted for all types
template <typename T> KAT_FD T maximum(T x, T y) = delete; // don't worry, it's not really deleted for all types
/**
* @brief Computes @p addend + |@p x- @p y| .
*
* See the <a href="https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#integer-arithmetic-instructions-sad">relevant section</a>
* of the PTX ISA reference.
*
* @note The addend and the result are always unsigned, but of the same size as @p x and @p y .
*/
template <typename I>
KAT_FD typename std::make_unsigned<I>::type
sum_with_absolute_difference(I x, I y, typename std::make_unsigned<I>::type addend);
// --------------------------------------------
// Bit and byte manipulation
// --------------------------------------------
template <typename I> KAT_FD int population_count(I x);
template <typename I> KAT_FD I bit_reverse(I x) = delete;
/**
* @brief Find the most-significant, i.e. leading, bit that's different
* from the input's sign bit.
*
* @return for unsigned types, 0-based index of the last 1 bit, starting
* from the LSB towards the MSB; for signed integers it's the same if their
* sign bit (their MSB) is 0, and the index of the last 0 bit if the sign
* bit is 1.
*/
template <typename I> KAT_FD unsigned find_leading_non_sign_bit(I x) = delete;
#if __CUDA_ARCH__ >= 320
template <typename T> KAT_FD T load_global_with_non_coherent_cache(const T* ptr);
#endif
/**
* @brief Return the number of bits, beginning from the least-significant,
* which are all 0 ("leading" zeros)
*
* @return The number of leading zeros, between 0 and the size of I in bits.
*/
template <typename I> KAT_FD int count_leading_zeros(I x) = delete;
namespace bit_field {
/**
* Extracts the bits with 0-based indices @p start_pos ... @p start_pos+ @p num_bits - 1, counting
* from least to most significant, from a bit field field. Has sign extension semantics
* for signed inputs which are bit tricky, see in the PTX ISA guide:
*
* http://docs.nvidia.com/cuda/parallel-thread-execution/index.html
*
* @todo CUB 1.5.2's BFE wrapper seems kind of fishy. Why does Duane Merill not use PTX for extraction from 64-bit fields?
* For now only adopting his implementation for the 32-bit case.
*
* @note This method is more "strict" in its specialization that others.
*/
template <typename I> KAT_FD I extract_bits(I bit_field, unsigned int start_pos, unsigned int num_bits) = delete;
template <typename I> KAT_FD I replace_bits(I original_bit_field, I bits_to_insert, unsigned int start_pos, unsigned int num_bits) = delete;
} // namespace bit_field
/**
* @brief See: <a href="http://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-prmt">relevant section</a>
* of the CUDA PTX reference for an explanation of what this does exactly
*
* @param first a first value from which to potentially use bytes
* @param second a second value from which to potentially use bytes
* @param byte_selectors a packing of 4 selector structures; each selector structure
* is 3 bits specifying which of the input bytes are to be used (as there are 8
* bytes overall in @p first and @p second ), and another bit specifying if it's an
* actual copy of a byte, or instead whether the sign of the byte (intrepeted as
* an int8_t) should be replicated to fill the target byte.
* @return the four bytes of first and/or second, or replicated signs thereof, indicated by the byte selectors
*
*@note If you don't use the sign-related bits, you could call this function "gather bytes" or "select bytes"
*
*/
KAT_FD unsigned permute_bytes(unsigned first, unsigned second, unsigned byte_selectors);
/**
* Use this to select which variant of the funnel shift intrinsic to use
*/
enum class funnel_shift_amount_resolution_mode_t {
take_lower_bits_of_amount, //!< Shift by shift_amount & (size_in_bits<native_word_t> - 1)
cap_at_full_word_size, //!< Shift by max(shift_amount, size_in_bits<native_word_t>)
};
/**
* @brief Performs a right-shift on the combination of the two arguments
* into a single, double-the-length, value
*
* @param low_word
* @param high_word
* @param shift_amount The number of bits to right-shift
*
* @tparam AmountResolutionMode shift_amount can have values which are
* higher than the maximum possible number of bits to right-shift; this
* indicates how to interpret such values.
*
* @return the lower bits of the result
*/
template <
funnel_shift_amount_resolution_mode_t AmountResolutionMode =
funnel_shift_amount_resolution_mode_t::cap_at_full_word_size
>
KAT_FD uint32_t funnel_shift_right(
uint32_t low_word,
uint32_t high_word,
uint32_t shift_amount);
/**
* @brief Performs a left-shift on the combination of the two arguments
* into a single, double-the-length, value
*
* @param low_word
* @param high_word
* @param shift_amount The number of bits to left-shift
*
* @tparam AmountResolutionMode shift_amount can have values which are
* higher than the maximum possible number of bits to right-shift; this
* indicates how to interpret such values.
*
* @return the upper bits of the result
*/
template <
funnel_shift_amount_resolution_mode_t AmountResolutionMode =
funnel_shift_amount_resolution_mode_t::cap_at_full_word_size
>
KAT_FD uint32_t funnel_shift_left(
uint32_t low_word,
uint32_t high_word,
uint32_t shift_amount);
// --------------------------------------------
/**
* @brief compute the average of two integer values without needing special
* accounting for overflow - rounding down
*/
template <typename I> I KAT_FD average(I x, I y) = delete; // don't worry, it's not really deleted for all types
/**
* @brief compute the average of two values without needing special
* accounting for overflow - rounding up
*
* @note ignoring type limits, average_rounded_up(x,y) = floor ((x + y + 1 ) / 2)
*/
template <typename I> I KAT_FD average_rounded_up(I x, I y) = delete; // don't worry, it's not really deleted for all types
/**
* Special register getter wrappers
*/
namespace special_registers {
KAT_FD unsigned lane_index();
KAT_FD unsigned symmetric_multiprocessor_index();
KAT_FD unsigned long long grid_index();
KAT_FD unsigned int dynamic_shared_memory_size();
KAT_FD unsigned int total_shared_memory_size();
} // namespace special_registers
namespace warp {
#if (__CUDACC_VER_MAJOR__ >= 9)
KAT_FD lane_mask_t ballot (int condition, lane_mask_t lane_mask = full_warp_mask);
KAT_FD int all_lanes_satisfy (int condition, lane_mask_t lane_mask = full_warp_mask);
KAT_FD int any_lanes_satisfy (int condition, lane_mask_t lane_mask = full_warp_mask);
KAT_FD int all_lanes_agree (int condition, lane_mask_t lane_mask = full_warp_mask);
// Note: all_lanes_agree has the same semantics as all_lanes_
#else
KAT_FD lane_mask_t ballot (int condition);
KAT_FD int all_lanes_satisfy (int condition);
KAT_FD int any_lanes_satisfy (int condition);
#endif
#if (__CUDACC_VER_MAJOR__ >= 9)
#if ! defined(__CUDA_ARCH__) or __CUDA_ARCH__ >= 700
template <typename T> KAT_FD lane_mask_t propagate_mask_if_lanes_agree(T value, lane_mask_t lane_mask);
template <typename T> KAT_FD lane_mask_t propagate_mask_if_warp_agrees(T value);
template <typename T> KAT_FD lane_mask_t get_matching_lanes(T value, lane_mask_t lanes = full_warp_mask);
#endif
#endif
namespace mask_of_lanes {
KAT_FD unsigned int preceding();
KAT_FD unsigned int preceding_and_self();
KAT_FD unsigned int self();
KAT_FD unsigned int succeeding_and_self();
KAT_FD unsigned int succeeding();
} // namespace mask_of_lanes
namespace shuffle {
#if (__CUDACC_VER_MAJOR__ < 9)
template <typename T> KAT_FD T arbitrary(T x, int source_lane, int width = warp_size);
template <typename T> KAT_FD T down(T x, unsigned delta, int width = warp_size);
template <typename T> KAT_FD T up(T x, unsigned delta, int width = warp_size);
template <typename T> KAT_FD T xor_(T x, int lane_id_xoring_mask, int width = warp_size);
#else
template <typename T> KAT_FD T arbitrary(T x, int source_lane, int width = warp_size, lane_mask_t participants = full_warp_mask);
template <typename T> KAT_FD T down(T x, unsigned delta, int width = warp_size, lane_mask_t participants = full_warp_mask);
template <typename T> KAT_FD T up(T x, unsigned delta, int width = warp_size, lane_mask_t participants = full_warp_mask);
template <typename T> KAT_FD T xor_(T x, int lane_id_xoring_mask, int width = warp_size, lane_mask_t participants = full_warp_mask);
#endif
// Notes:
// 1. we have to use `xor_` here since `xor` is a reserved word
// 2. Why is lane_mask an `int` when bitmasks typically use unsigned types?
// Because that's how nVIDIA's shuffle-xor signature expects it; probably
// no good reason.
} // namespace shuffle
} // namespace warp
} // namespace builtins
} // namespace kat
#include "detail/builtins.cuh"
#endif // CUDA_KAT_ON_DEVICE_BUILTINS_CUH_
|
the_stack
|
#pragma once
#include <gunrock/app/enactor_base.cuh>
#include <gunrock/app/enactor_iteration.cuh>
#include <gunrock/app/enactor_loop.cuh>
#include <gunrock/oprtr/oprtr.cuh>
#include <gunrock/app/snn/snn_problem.cuh>
#include <gunrock/app/snn/snn_helpers.cuh>
#include <gunrock/util/scan_device.cuh>
#include <gunrock/util/sort_device.cuh>
#include <gunrock/oprtr/1D_oprtr/for.cuh>
//#include <utility>
// KNN app
#include <gunrock/app/knn/knn_enactor.cuh>
#include <gunrock/app/knn/knn_test.cuh>
//#define SNN_ASSERT 1
//#define SNN_DEBUG 1
#ifdef SNN_DEBUG
#define debug(a...) printf(a)
#else
#define debug(a...)
#endif
namespace gunrock {
namespace app {
namespace snn {
/**
* @brief Speciflying parameters for snn Enactor
* @param parameters The util::Parameter<...> structure holding all parameter
* info \return cudaError_t error message(s), if any
*/
cudaError_t UseParameters_enactor(util::Parameters ¶meters) {
cudaError_t retval = cudaSuccess;
GUARD_CU(app::UseParameters_enactor(parameters));
return retval;
}
/**
* @brief defination of snn iteration loop
* @tparam EnactorT Type of enactor
*/
template <typename EnactorT>
struct snnIterationLoop : public IterationLoopBase<EnactorT, Use_FullQ | Push> {
typedef typename EnactorT::VertexT VertexT;
typedef typename EnactorT::SizeT SizeT;
typedef typename EnactorT::ValueT ValueT;
typedef typename EnactorT::Problem::GraphT::CsrT CsrT;
typedef typename EnactorT::Problem::GraphT::GpT GpT;
typedef IterationLoopBase<EnactorT, Use_FullQ | Push> BaseIterationLoop;
snnIterationLoop() : BaseIterationLoop() {}
/**
* @brief Core computation of knn, one iteration
* @param[in] peer_ Which GPU peers to work on, 0 means local
* \return cudaError_t error message(s), if any
*/
cudaError_t Core(int peer_ = 0) {
// --
// Alias variables
auto &data_slice = this->enactor->problem->data_slices[this->gpu_num][0];
auto &enactor_slice =
this->enactor
->enactor_slices[this->gpu_num * this->enactor->num_gpus + peer_];
auto &enactor_stats = enactor_slice.enactor_stats;
auto &oprtr_parameters = enactor_slice.oprtr_parameters;
auto &retval = enactor_stats.retval;
auto num_points = data_slice.num_points;
// K-Nearest Neighbors data
auto &knns = data_slice.knns;
// Number of KNNs
auto k = data_slice.k;
// Parameter of density
auto eps = data_slice.eps;
// Parameter of core point
auto min_pts = data_slice.min_pts;
// Shared Nearest Neighbors
auto &snn_density = data_slice.snn_density;
auto &cluster_id = data_slice.cluster_id;
auto &core_points = data_slice.core_points;
auto &core_points_counter = data_slice.core_points_counter;
auto &flag = data_slice.flag;
auto &core_point_mark_0 = data_slice.core_point_mark_0;
auto &core_point_mark = data_slice.core_point_mark;
auto &visited = data_slice.visited;
auto &noise_points = data_slice.noise_points;
// CUB Related storage
auto &cub_temp_storage = data_slice.cub_temp_storage;
auto &offsets = data_slice.offsets;
auto &knns_sorted = data_slice.knns_out;
cudaStream_t stream = oprtr_parameters.stream;
auto target = util::DEVICE;
//util::Array1D<SizeT, VertexT> *null_frontier = NULL;
oprtr_parameters.advance_mode = "ALL_EDGES";
#ifdef SNN_ASSERT
GUARD_CU(knns.ForAll(
[num_points, k, noise_points] __host__ __device__(SizeT * knns_,
const SizeT &pos) {
for (int i = 0; i < num_points; ++i) {
for (int j = 0; j < k; ++j) {
assert(knns_[i * k + j] != i);
}
}
},
1, target, stream));
#endif
#ifdef SNN_DEBUG
// DEBUG ONLY
GUARD_CU(knns.ForAll(
[num_points, k] __host__ __device__(SizeT * knns_, const SizeT &pos) {
debug("[knn_enactor] knn:\n");
for (int i = 0; i < num_points; ++i) {
debug("knn[%d]: ", i);
for (int j = 0; j < k; ++j) {
debug("%d ", knns_[i * k + j]);
}
debug("\n");
}
},
1, target, stream));
#endif
// Sort all the knns using CUB
GUARD_CU(util::SegmentedSort(knns, knns_sorted, num_points*k,
num_points, offsets, /* int begin_bit = */ 0,
/* int end_bit = */ sizeof(SizeT) * 8,
stream));
// Do not remove cudaDeviceSynchronize, CUB is running on different stream and Device synchronization is required
// GUARD_CU2(cudaStreamSynchronize(stream), "cudaDeviceSynchronize failed.");
#ifdef SNN_DEBUG
GUARD_CU(knns_sorted.ForAll(
[num_points, k] __host__ __device__(SizeT * knns_, const SizeT &pos) {
auto i = pos / k;
auto j = pos % k;
assert(knns_[i * k + j] != i);
},
num_points * k, target, stream));
#endif
#ifdef SNN_DEBUG
// DEBUG ONLY
GUARD_CU(knns_sorted.ForAll(
[num_points, k] __host__ __device__(SizeT * knns_, const SizeT &pos) {
debug("[knn_enactor] knn:\n");
for (int i = 0; i < num_points; ++i) {
debug("knn[%d]: ", i);
for (int j = 0; j < k; ++j) {
debug("%d ", knns_[i * k + j]);
}
debug("\n");
}
},
1, target, stream));
#endif
// Fill out knns unsorted array if InvalidValues - needed to mark SNN
GUARD_CU(knns.ForAll(
[] __host__ __device__(SizeT * knns_, const SizeT &pos) {
knns_[pos] = util::PreDefinedValues<SizeT>::InvalidValue;
},
num_points * k, target, stream));
// Find candidates for SNN
auto SNNcandidates_op = [k, knns_sorted]
__host__ __device__(SizeT* knns_, const SizeT &pos) {
auto x = pos / k;
auto q = knns_sorted[x * k + (pos % k)];
#pragma unroll // all iterations are independent
for (int i = 0; i < k; ++i) {
if (knns_sorted[q * k + i] == x) {
knns_[x * k + (pos%k)] = i;
break;
}
}
};
// Find density of each point
GUARD_CU(knns.ForAll(SNNcandidates_op, num_points * k, target, stream));
// SNN density of each point
auto density_op = [num_points, k, eps, min_pts, knns_sorted,
snn_density, visited]
__host__ __device__(SizeT * knns_, const SizeT &pos) {
// for (int pos = 0; pos < k*num_points; ++pos){// //uncomment for debug
auto x = pos / k;
auto q = knns_sorted[x * k + (pos % k)];
auto snn_candidate = knns_[x * k + (pos % k)];
if (!util::isValid(snn_candidate))
return;
// SNN candidate exists
// Checking SNN similarity
// knns are sorted, counting intersection of knns[x] and knns[q]
auto similarity = SNNsimilarity(x, q, knns_sorted, eps, k);
//printf("similarity of %d and %d is %d, what about eps %d\n", x, q, similarity, eps);
if (similarity > eps) {
// x and q are SNN
atomicAdd(&snn_density[x], 1);
visited[x] = 1;
}else{
similarity = util::PreDefinedValues<SizeT>::InvalidValue;
}
knns_[x * k + (pos % k)] = similarity;
// } //uncomment for debug
};
// Find density of each point
GUARD_CU(knns.ForAll(density_op, num_points*k, target, stream));
// GUARD_CU(frontier.V_Q()->ForAll(density_op, 1, target, stream)); //uncomment for debug
GUARD_CU2(cudaStreamSynchronize(stream), "cudaStreamSynchronize failed.");
#ifdef SNN_DEBUG
// DEBUG ONLY: write down densities:
GUARD_CU(snn_density.ForAll(
[num_points, k] __host__ __device__(SizeT * sd, const SizeT &pos) {
debug("snn densities: \n");
for (int i = 0; i < num_points; ++i) {
debug("density[%d] = %d\n", i, sd[i]);
}
},
1, target, stream));
#endif
// Mark core points, initialize clusters
GUARD_CU(core_point_mark_0.ForAll(
[snn_density, min_pts, cluster_id, visited] __host__ __device__(
SizeT * cp, const SizeT &pos) {
if (visited[pos] && snn_density[pos] >= min_pts) {
cp[pos] = 1;
cluster_id[pos] = pos;
}
},
num_points, target, stream));
GUARD_CU(util::cubInclusiveSum(cub_temp_storage, core_point_mark_0,
core_point_mark, num_points, stream));
// Do not remove cudaDeviceSynchronize, CUB is running on different stream and Device synchronization is required
GUARD_CU2(cudaStreamSynchronize(stream), "cudaStreamSynchronize failed.");
GUARD_CU(core_points.ForAll(
[num_points, core_point_mark, core_points_counter, visited, snn_density, min_pts]
__host__ __device__(SizeT * cps, const SizeT &pos) {
if (visited[pos] && snn_density[pos] >= min_pts) {
cps[core_point_mark[pos] - 1] = pos;
}
if (pos == num_points - 1)
core_points_counter[0] = core_point_mark[pos];
},
num_points, target, stream));
GUARD_CU(core_points_counter.Move(util::DEVICE, util::HOST, 1, 0, stream));
// Do not remove, needed by Move
GUARD_CU2(cudaStreamSynchronize(stream), "cudaStreamSynchronize failed");
printf("GPU number of core points found: %d\n", core_points_counter[0]);
#ifdef SNN_DEBUG
// DEBUG ONLY: write down core points
GUARD_CU(core_point_mark_0.ForAll(
[core_points_counter, core_points] __host__ __device__(
SizeT * cp, const SizeT &pos) {
SizeT cpc = core_points_counter[0];
debug("core pointes: \n");
for (int i = 0; i < cpc; ++i) {
debug("%d ", core_points[i]);
}
debug("\n");
},
1, target, stream));
#endif
// For each x - core point, remove neighbors q which are:
// not core points, q >= x, not shared nearest neighbors
GUARD_CU(knns_sorted.ForAll(
[core_points, k, visited, snn_density, min_pts, knns] __host__ __device__(
SizeT * knns_, const SizeT &pos) {
int x = core_points[pos/k];
auto q = knns_[x * k + (pos%k)];
if (!visited[q] || snn_density[q] < min_pts ||
q >= x || !util::isValid(knns[x * k + (pos%k)])) {
knns_[x * k + (pos%k)] = util::PreDefinedValues<SizeT>::InvalidValue;
knns[x * k + (pos%k)] = util::PreDefinedValues<SizeT>::InvalidValue;
}
},
core_points_counter[0]*k, target, stream));
/*
//DO NOT REMOVE - this part is rewriting neighbors which are in use to the front of array
GUARD_CU(knns_sorted.ForAll(
[core_points, k, visited, snn_density, min_pts, knns] __host__ __device__(
SizeT * knns_, const SizeT &pos) {
// only for core points
int x = core_points[pos];
int last = x * k;
for (int i = 0; i < k; ++i){
auto q = knns_[x * k + i];
if (util::isValid(q)){
knns_[last] = q;
knns[last] = knns[x * k + i];
++last;
}
}
for (; last < x * k + k; ++last) {
knns_[last] = util::PreDefinedValues<SizeT>::InvalidValue;
knns[last] = util::PreDefinedValues<SizeT>::InvalidValue;
}
},
core_points_counter[0], target, stream));
*/
// Core points merging
// On the beginning cluster_id[x] = x for each core point x
for (int iter = 0; iter < k; ++iter) {
// Build trees for i-th iteration
auto build_trees_op =
[iter, k, core_points, knns_sorted] __host__ __device__(
SizeT * cluster, const SizeT &pos) {
auto x = core_points[pos];
// q < x
auto q = knns_sorted[x * k + iter];
if (!util::isValid(q)) return;
auto cluster_q = Load<cub::LOAD_CG>(cluster + q);
auto cluster_x = Load<cub::LOAD_CG>(cluster + x);
if (cluster_q == cluster_x){
knns_sorted[x * k + iter] = util::PreDefinedValues<SizeT>::InvalidValue;
return;
}
if (cluster_x == x){
// only x is going to change cluster[x]
cluster[x] = cluster_q;
knns_sorted[x * k + iter] = util::PreDefinedValues<SizeT>::InvalidValue;
}
};
// Building cluster_id tree
GUARD_CU(cluster_id.ForAll(build_trees_op, core_points_counter[0], target,
stream));
// Reduction trees to stars
auto reduce_op = [cluster_id, core_points]
__host__ __device__(const int &cos, const SizeT &pos) {
auto x = core_points[pos];
auto cluster_x = Load<cub::LOAD_CG>(cluster_id + x);
auto cluster_cluster_x = Load<cub::LOAD_CG>(cluster_id + cluster_x);
cluster_id[x] = cluster_cluster_x;
};
// Reduce trees to stars
SizeT loop_size = core_points_counter[0];
SizeT num_repeats = log2(core_points_counter[0]);
gunrock::oprtr::RepeatFor(
reduce_op, num_repeats, loop_size, util::DEVICE, stream,
util::PreDefinedValues<int>::InvalidValue, // grid_size
util::PreDefinedValues<int>::InvalidValue, // block_size
2);
// Zero-waste, core_point_mark_0 used again to mark current pairs to merge
auto &pairs_to_merge = core_point_mark_0;
GUARD_CU(pairs_to_merge.ForAll(
[core_points] __host__ __device__ (SizeT* c, const SizeT &pos){
auto x = core_points[pos];
c[x] = util::PreDefinedValues<SizeT>::InvalidValue;
}, core_points_counter[0], target, stream));
// Mark to merge
auto mark_to_merge_op =
[k, cluster_id, pairs_to_merge, iter, knns_sorted, flag, core_points]
//__host__ __device__(const int &cos, const SizeT &pos) {
__host__ __device__(SizeT * c_p, const SizeT &pos) {
// x core point
auto x = core_points[pos];
// q < x
auto q = knns_sorted[x * k + iter];
if (!util::isValid(q)) return;
auto cluster_q = cluster_id[q];
while (cluster_id[cluster_q] != cluster_q) cluster_q = cluster_id[cluster_q];
auto cluster_x = cluster_id[x];
while (cluster_id[cluster_x] != cluster_x) cluster_x = cluster_id[cluster_x];
if (cluster_x == cluster_q){
knns_sorted[x * k + iter] = util::PreDefinedValues<SizeT>::InvalidValue;
}else if (cluster_x > cluster_q){
// pairs_to_merge[cluster_x] = cluster_q
auto old = atomicCAS(pairs_to_merge + cluster_x, util::PreDefinedValues<SizeT>::InvalidValue, cluster_q);
if (!util::isValid(old)){
// Done! it is going to happend
knns_sorted[x * k + iter] = util::PreDefinedValues<SizeT>::InvalidValue;
flag[0] = 1;
}
}else{
// pairs_to_merge[cluster_q] = cluster_x
auto old = atomicCAS(pairs_to_merge + cluster_q, util::PreDefinedValues<SizeT>::InvalidValue, cluster_x);
if (!util::isValid(old)){
// Done! it is going to happend
knns_sorted[x * k + iter] = util::PreDefinedValues<SizeT>::InvalidValue;
flag[0] = 1;
}
}
};
// Merge
auto merge_op =
[cluster_id, pairs_to_merge, flag, core_points]
__host__ __device__(SizeT * c_p, const SizeT &pos) {
//__host__ __device__(const int &cos, const SizeT &pos) {
auto x = core_points[pos];
auto q = pairs_to_merge[x];
if (!util::isValid(q)) return;
pairs_to_merge[x] = util::PreDefinedValues<SizeT>::InvalidValue;
// Only x is going to change cluster[x], so no atomic needed
cluster_id[x] = Load<cub::LOAD_CG>(cluster_id + q);
};
/* int max_num_repeats = (int)core_points_counter[0];
gunrock::oprtr::DoubleWhile(
mark_to_merge_op, merge_op, flag, loop_size, util::DEVICE,
max_num_repeats,
stream,
util::PreDefinedValues<int>::InvalidValue, // grid_size
util::PreDefinedValues<int>::InvalidValue, // block_size
0);*/
// TO DO increase load balance
// - pairs can be hashed into another array
// Merging conflicted pairs, # pairs < # core points
// Reduce trees to stars
for (int j=0; j<core_points_counter[0]; ++j){
GUARD_CU(flag.ForAll(
[] __host__ __device__ (SizeT* f, const SizeT &pos){
f[pos] = 0;
}, 1, target, stream));
// Mark to merge
GUARD_CU(core_points.ForAll(mark_to_merge_op, core_points_counter[0], target,
stream));
// Merge
GUARD_CU(core_points.ForAll(merge_op, core_points_counter[0], target,
stream));
GUARD_CU(flag.Move(util::DEVICE, util::HOST, 1, 0, stream));
// Do not remove, needed by Move
GUARD_CU2(cudaStreamSynchronize(stream), "cudaStreamSynchronize failed");
if (flag[0] == 0)
break;
}//iteration over number of core points
}//iteration over k nearest neighbors
#ifdef SNN_DEBUG
// DEBUG ONLY: write down densities:
GUARD_CU(cluster_id.ForAll(
[num_points, k] __host__ __device__(SizeT * c_id, const SizeT &pos) {
debug("clusters after merging core points: \n");
for (int i = 0; i < num_points; ++i) {
debug("cluster[%d] = %d\n", i, c_id[i]);
}
},
1, target, stream));
#endif
debug("gpu noise points: ");
// Assign other non-core and non-noise points to clusters
auto clustering_op =
[core_point_mark, eps, k, cluster_id, min_pts, knns_sorted,
noise_points, knns, visited, snn_density] __host__
__device__(SizeT * v_q, const SizeT &src) {
// only non-core points
if (visited[src] && snn_density[src] >= min_pts) return;
SizeT counterMax = 0;
SizeT max_y = util::PreDefinedValues<SizeT>::InvalidValue;
for (int i = 0; i < k; ++i) {
SizeT y = knns_sorted[src * k + i];
if (visited[y] && snn_density[y] >= min_pts) {
SizeT SNNsm = knns[src * k + i];
if (util::isValid(SNNsm) && SNNsm > counterMax) {
counterMax = SNNsm;
max_y = y;
}
}
}
// only non-noise points
if (util::isValid(max_y)) {
cluster_id[src] = cluster_id[max_y];
} else {
cluster_id[src] = util::PreDefinedValues<SizeT>::InvalidValue;
atomicAdd(&noise_points[0], 1);
debug("%d ", src);
}
};
debug("\n");
// Assign other non-core and non-noise points to clusters
// GUARD_CU(core_points.ForAll(clustering_op, num_points, target, stream));
GUARD_CU(core_points.ForAll(clustering_op, num_points, target, stream));
#ifdef SNN_DEBUG
// DEBUG ONLY: write down densities:
GUARD_CU(cluster_id.ForAll(
[num_points, k] __host__ __device__(SizeT * c_id, const SizeT &pos) {
debug("clusters after adding non core points: \n");
for (int i = 0; i < num_points; ++i) {
debug("cluster[%d] = %d\n", i, c_id[i]);
}
},
1, target, stream));
#endif
return retval;
}
/**
* @brief Routine to combine received data and local data
* @tparam NUM_VERTEX_ASSOCIATES Number of data associated with each
* transmition item, typed VertexT
* @tparam NUM_VALUE__ASSOCIATES Number of data associated with each
* transmition item, typed ValueT
* @param received_length The numver of transmition items received
* @param[in] peer_ which peer GPU the data came from
* \return cudaError_t error message(s), if any
*/
template <int NUM_VERTEX_ASSOCIATES, int NUM_VALUE__ASSOCIATES>
cudaError_t ExpandIncoming(SizeT &received_length, int peer_) {
// ================ INCOMPLETE TEMPLATE - MULTIGPU ====================
auto &data_slice = this->enactor->problem->data_slices[this->gpu_num][0];
auto &enactor_slice =
this->enactor
->enactor_slices[this->gpu_num * this->enactor->num_gpus + peer_];
// auto iteration = enactor_slice.enactor_stats.iteration;
auto expand_op = [
// TODO: pass data used by the lambda, e.g.:
] __host__ __device__(VertexT & key, const SizeT &in_pos,
VertexT *vertex_associate_ins,
ValueT *value__associate_ins) -> bool {
return true;
};
cudaError_t retval =
BaseIterationLoop::template ExpandIncomingBase<NUM_VERTEX_ASSOCIATES,
NUM_VALUE__ASSOCIATES>(
received_length, peer_, expand_op);
return retval;
}
bool Stop_Condition(int gpu_num = 0) {
auto it = this->enactor->enactor_slices[0].enactor_stats.iteration;
if (it > 0)
return true;
else
return false;
}
}; // end of snnIteration
/**
* @brief snn enactor class.
* @tparam _Problem Problem type we process on
* @tparam ARRAY_FLAG Flags for util::Array1D used in the enactor
* @tparam cudaHostRegisterFlag Flags for util::Array1D used in the enactor
*/
template <typename _Problem, util::ArrayFlag ARRAY_FLAG = util::ARRAY_NONE,
unsigned int cudaHostRegisterFlag = cudaHostRegisterDefault>
class Enactor
: public EnactorBase<
typename _Problem::GraphT, typename _Problem::GraphT::VertexT,
typename _Problem::GraphT::ValueT, ARRAY_FLAG, cudaHostRegisterFlag> {
public:
typedef _Problem Problem;
typedef typename Problem::SizeT SizeT;
typedef typename Problem::VertexT VertexT;
typedef typename Problem::GraphT GraphT;
typedef typename GraphT::VertexT LabelT;
typedef typename GraphT::ValueT ValueT;
typedef EnactorBase<GraphT, LabelT, ValueT, ARRAY_FLAG, cudaHostRegisterFlag>
BaseEnactor;
typedef Enactor<Problem, ARRAY_FLAG, cudaHostRegisterFlag> EnactorT;
typedef snnIterationLoop<EnactorT> IterationT;
Problem *problem;
IterationT *iterations;
/**
* @brief snn constructor
*/
Enactor() : BaseEnactor("SNN"), problem(NULL) {
this->max_num_vertex_associates = 0;
this->max_num_value__associates = 1;
}
/**
* @brief snn destructor
*/
virtual ~Enactor() { /*Release();*/
}
/*
* @brief Releasing allocated memory space
* @param target The location to release memory from
* \return cudaError_t error message(s), if any
*/
cudaError_t Release(util::Location target = util::LOCATION_ALL) {
cudaError_t retval = cudaSuccess;
GUARD_CU(BaseEnactor::Release(target));
delete[] iterations;
iterations = NULL;
problem = NULL;
return retval;
}
/**
* @brief Initialize the problem.
* @param[in] problem The problem object.
* @param[in] target Target location of data
* \return cudaError_t error message(s), if any
*/
cudaError_t Init(Problem &problem, util::Location target = util::DEVICE) {
cudaError_t retval = cudaSuccess;
this->problem = &problem;
//SizeT num_points = problem.num_points;
// Lazy initialization
GUARD_CU(BaseEnactor::Init(problem, Enactor_None, 2, NULL, target, false));
for (int gpu = 0; gpu < this->num_gpus; gpu++) {
GUARD_CU(util::SetDevice(this->gpu_idx[gpu]));
auto &enactor_slice = this->enactor_slices[gpu * this->num_gpus + 0];
auto &graph = problem.sub_graphs[gpu];
// GUARD_CU(enactor_slice.frontier.Allocate(1, 1,
// this->queue_factors));
GUARD_CU(enactor_slice.frontier.Allocate(1, 1, this->queue_factors));
// num_points, num_points * num_points, this->queue_factors));
}
iterations = new IterationT[this->num_gpus];
for (int gpu = 0; gpu < this->num_gpus; gpu++) {
GUARD_CU(iterations[gpu].Init(this, gpu));
}
GUARD_CU(this->Init_Threads(
this, (CUT_THREADROUTINE) & (GunrockThread<EnactorT>)));
return retval;
}
/**
* @brief one run of snn, to be called within GunrockThread
* @param thread_data Data for the CPU thread
* \return cudaError_t error message(s), if any
*/
cudaError_t Run(ThreadSlice &thread_data) {
gunrock::app::Iteration_Loop<
// change to how many {VertexT, ValueT} data need to communicate
// per element in the inter-GPU sub-frontiers
0, 1, IterationT>(thread_data, iterations[thread_data.thread_num]);
return cudaSuccess;
}
/**
* @brief Reset enactor
...
* @param[in] target Target location of data
* \return cudaError_t error message(s), if any
*/
cudaError_t Reset(util::Location target = util::DEVICE) {
typedef typename GraphT::GpT GpT;
cudaError_t retval = cudaSuccess;
GUARD_CU(BaseEnactor::Reset(target));
SizeT num_points = this->problem->data_slices[0][0].num_points;
// this->problem->data_slices[0][0].sub_graph[0].num_points;
for (int gpu = 0; gpu < this->num_gpus; gpu++) {
if (this->num_gpus == 1) {
this->thread_slices[gpu].init_size = num_points;
for (int peer_ = 0; peer_ < this->num_gpus; peer_++) {
auto &frontier =
this->enactor_slices[gpu * this->num_gpus + peer_].frontier;
frontier.queue_length = (peer_ == 0) ? 1 : 0;//num_points : 0;
if (peer_ == 0) {
util::Array1D<SizeT, VertexT> tmp;
tmp.Allocate(num_points, target | util::HOST);
for (SizeT i = 0; i < 1; ++i) {
tmp[i] = (VertexT)i % num_points;
}
GUARD_CU(tmp.Move(util::HOST, target));
GUARD_CU(frontier.V_Q()->ForEach(
tmp,
[] __host__ __device__(VertexT & v, VertexT & i) { v = i; },
1, target, 0));
// num_points, target, 0));
tmp.Release();
}
}
} else {
// MULTIGPU INCOMPLETE
}
}
GUARD_CU(BaseEnactor::Sync());
return retval;
}
/**
* @brief Enacts a snn computing on the specified graph.
...
* \return cudaError_t error message(s), if any
*/
cudaError_t Enact() {
cudaError_t retval = cudaSuccess;
GUARD_CU(this->Run_Threads(this));
util::PrintMsg("GPU SNN Done.", this->flag & Debug);
return retval;
}
};
} // namespace snn
} // namespace app
} // namespace gunrock
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
|
the_stack
|
#include "DeviceSymbolCopy.h"
#include "LayoutTranslator.h"
#include "SignalProcessingFitterQueue.h"
#include "JobWrapper.h"
#define TRANSLATE_DEBUG_OUTPUT 0
////////////////////////////////////
//Translator Functions
//Translate the FgBuffer into the Image cube
void TranslatorsFlowByFlow::TranslateFgBuffer_RegionToCube( LayoutCubeWithRegions<short> & ImageCube,
size_t numLBeads,
size_t numFrames,
size_t flowsPerBLock,
FG_BUFFER_TYPE *fgPtr,
BeadParams * bP,
size_t regId)
{
size_t x,y;
//ImgRegParams ImageParams = ImageCube.getParams();
// ImageCube.SetValueRegion(0,regId);
ImageCube.setRWStrideZ();
for(size_t idx = 0; idx < numLBeads; idx++ ){
x = bP->x;
y = bP->y;
ImageCube.setRWPtrRegion(regId,x,y);
FG_BUFFER_TYPE * fgPtrFrames = fgPtr;
for(size_t f = 0; f < numFrames; f++)
{
ImageCube.write(*fgPtrFrames);
fgPtrFrames++;
}
// move to next bead
fgPtr += numFrames*flowsPerBLock;
bP++;
}
}
void TranslatorsFlowByFlow::TranslateFgBuffer_CubeToRegion( LayoutCubeWithRegions<short> & ImageCube,
size_t numLBeads,
size_t numFrames,
size_t flowsPerBLock,
FG_BUFFER_TYPE *fgPtr,
BeadParams * bP,
size_t regId)
{
size_t x,y;
//ImgRegParams ImageParams = ImageCube.getParams();
ImageCube.setRWStrideZ();
for(size_t idx = 0; idx < numLBeads; idx++ ){
x = bP->x;
y = bP->y;
ImageCube.setRWPtrRegion(regId,x,y);
FG_BUFFER_TYPE * fgPtrFrames = fgPtr;
for(size_t f = 0; f < numFrames; f++)
{
*fgPtrFrames = ImageCube.read();
fgPtrFrames++;
}
// move to next bead
fgPtr += numFrames*flowsPerBLock;
bP++;
}
}
//Translate BeadParam struct into BeadParam Cube
void TranslatorsFlowByFlow::TranslateBeadParams_RegionToCube(LayoutCubeWithRegions<float> & BeadParamCube, void* bkinfo, size_t regId)
{
BkgModelWorkInfo* info = (BkgModelWorkInfo*)bkinfo;
WorkSet myJob(info);
BeadParams * bP = myJob.getBeadParams();
int numLBeads = myJob.getNumBeads();
BeadParamCube.setRWStrideZ();
assert(BeadParamCube.getDimZ() >= Bp_NUM_PARAMS);
for(int b = 0; b < numLBeads; b++){
BeadParamCube.setRWPtrRegion(regId,bP->x,bP->y);
BeadParamCube.write(bP->Copies);
BeadParamCube.write(bP->R);
BeadParamCube.write(bP->dmult);
BeadParamCube.write(bP->gain);
BeadParamCube.write(bP->tau_adj);
BeadParamCube.write(bP->phi);
for(int p = 0; p < NUM_DM_PCA; p++) BeadParamCube.write(bP->pca_vals[p]);
bP++;
}
}
void TranslatorsFlowByFlow::TranslateBeadParams_CubeToRegion(LayoutCubeWithRegions<float> & BeadParamCube, size_t numLBeads, BeadParams * bP, size_t regId)
{
BeadParamCube.setRWStrideZ();
assert(BeadParamCube.getDimZ() >= Bp_NUM_PARAMS);
for(size_t b = 0; b < numLBeads; b++){
BeadParamCube.setRWPtrRegion(regId,bP->x,bP->y);
bP->Copies = BeadParamCube.read();
bP->R = BeadParamCube.read();
bP->dmult = BeadParamCube.read();
bP->gain = BeadParamCube.read();
bP->tau_adj = BeadParamCube.read();
bP->phi = BeadParamCube.read();
for(int p = 0; p < NUM_DM_PCA; p++) bP->pca_vals[p] = BeadParamCube.read();
bP++;
}
}
//Translate Bead State into Bead State Cube
void TranslatorsFlowByFlow::TranslatePolyClonal_RegionToCube(LayoutCubeWithRegions<float> & PolyClonalCube, void* bkinfo, size_t regId)
{
BkgModelWorkInfo* info = (BkgModelWorkInfo*)bkinfo;
WorkSet myJob(info);
BeadParams * bP = myJob.getBeadParams();
int numLBeads = myJob.getNumBeads();
PolyClonalCube.setRWStrideZ();
assert(PolyClonalCube.getDimZ() >= Poly_NUM_PARAMS);
for(int b = 0; b < numLBeads; b++){
PolyClonalCube.setRWPtrRegion(regId,bP->x,bP->y);
PolyClonalCube.write(bP->my_state->ppf);
PolyClonalCube.write(bP->my_state->ssq);
PolyClonalCube.write(bP->my_state->key_norm);
bP++;
}
}
//Translate Bead State flags into BeadState Mask
void TranslatorsFlowByFlow::TranslateBeadStateMask_RegionToCube( LayoutCubeWithRegions<unsigned short> & BkgModelMask, void* bkinfo, size_t regId)
{
BkgModelWorkInfo* info = (BkgModelWorkInfo*)bkinfo;
WorkSet myJob(info);
BeadParams * bP = myJob.getBeadParams();
int numLBeads = myJob.getNumBeads();
for(int b = 0; b < numLBeads; b++){
bead_state * Bs = bP->my_state;
unsigned short maskValue = 0;
maskValue |= (Bs->bad_read)?(BkgMaskBadRead):(0);
maskValue |= (!Bs->clonal_read)?(BkgMaskPolyClonal):(0);
maskValue |= (Bs->corrupt)?(BkgMaskCorrupt):(0);
maskValue |= (Bs->pinned)?(BkgMaskPinned):(0);
maskValue |= (Bs->random_samp)?(BkgMaskRandomSample):(0);
maskValue |= (info->bkgObj->region_data->my_beads.sampled[b])?(BkgMaskRegionalSampled):(0);
maskValue |= (info->bkgObj->region_data->my_beads.high_quality[b])?(BkgMaskHighQaulity):(0);
BkgModelMask.putAtReg(maskValue, regId, bP->x, bP->y);
bP++;
}
}
void TranslatorsFlowByFlow::TranslateBeadStateMask_CubeToRegion( LayoutCubeWithRegions<unsigned short> & BkgModelMask, void* bkinfo, size_t regId)
{
BkgModelWorkInfo* info = (BkgModelWorkInfo*)bkinfo;
WorkSet myJob(info);
BeadParams * bP = myJob.getBeadParams();
int numLBeads = myJob.getNumBeads();
for(int b = 0; b < numLBeads; b++){
unsigned short maskValue = 0;
maskValue = BkgModelMask.getAtReg(regId, bP->x, bP->y);
//sofar only Corrupt flag might get updated in kernel
// bP->my_state->bad_read = (maskValue & BkgMaskBadRead);
bP->my_state->corrupt = (maskValue & BkgMaskCorrupt);
bP->my_state->clonal_read = (!(maskValue & BkgMaskPolyClonal));
}
}
//translate Results from beadParams to Result Cube
void TranslatorsFlowByFlow::TranslateResults_RegionToCube(LayoutCubeWithRegions<float> & ResultCube, size_t numLBeads, size_t flowIdxInBlock, BeadParams * bP, size_t regId){
ResultCube.setRWStrideZ();
assert(ResultCube.getDimZ() >= Result_NUM_PARAMS);
for(size_t b = 0; b < numLBeads; b++){
ResultCube.setRWPtrRegion(regId,bP->x,bP->y,ResultAmpl);
ResultCube.write(bP->Ampl[flowIdxInBlock]);
ResultCube.write(bP->kmult[flowIdxInBlock]);
ResultCube.write(bP->my_state->avg_err);
bP++;
}
}
void TranslatorsFlowByFlow::TranslateResults_CubeToRegion(LayoutCubeWithRegions<float> & ResultCube, void * bkinfo, size_t flowIdxInBlock, size_t regId)
{
BkgModelWorkInfo* info = (BkgModelWorkInfo*)bkinfo;
WorkSet myJob(info);
size_t numLBeads = myJob.getNumBeads();
BeadParams *bP = myJob.getBeadParams();
ResultCube.setRWStrideZ();
assert(ResultCube.getDimZ() == Result_NUM_PARAMS);
for(size_t b = 0; b < numLBeads; b++){
//ResultCube.setRWPtrRegion(regId,bP->x,bP->y,ResultAmpl);
bP->Ampl[flowIdxInBlock] = ResultCube.getAtReg(regId,bP->x,bP->y,ResultAmpl); // TODO: remove this plane and use AMPL
bP->kmult[flowIdxInBlock] = ResultCube.getAtReg(regId,bP->x,bP->y,ResultKmult);
bP->my_state->avg_err = ResultCube.getAtReg(regId,bP->x,bP->y,ResultAvgErr);
bP++;
}
}
void TranslatorsFlowByFlow::TranslateRegionParams_CubeToRegion(LayoutCubeWithRegions<reg_params> & RegionCube, reg_params * rP, size_t regId)
{
*rP = RegionCube.getAtReg(regId);
}
void TranslatorsFlowByFlow::TranslateRegionParams_RegionToCube( LayoutCubeWithRegions<reg_params> & RegionCube, void* bkinfo,
size_t regId)
{
WorkSet myJob((BkgModelWorkInfo*)bkinfo);
RegionCube[regId] = *(myJob.getRegionParams());
}
void TranslatorsFlowByFlow::TranslateRegionFrameCube_RegionToCube( LayoutCubeWithRegions<float> & RegionFrameCube, void * bkinfo,
size_t regId)
{
BkgModelWorkInfo* info = (BkgModelWorkInfo*)bkinfo;
WorkSet myJob(info);
size_t numFrames = myJob.getNumFrames();
//frames by region by param cuda
RegionFrameCube.setRWStrideX();
//DeltaFrames
float * ptr = myJob.getDeltaFrames();
RegionFrameCube.setRWPtrRegion(regId,0,0,RfDeltaFrames);
RegionFrameCube.writeByStride(ptr,numFrames);
ptr = myJob.getFrameNumber();
RegionFrameCube.setRWPtrRegion(regId,0,0,RfFrameNumber);
RegionFrameCube.writeByStride(ptr,numFrames);
ptr = myJob.getDarkMatter();
for(int vec = 0; vec < 4; vec++){
//RegionFrameCube.setRWPtr(0,regId,RfDarkMatter0+vec);
RegionFrameCube.setRWPtrRegion(regId,0,0,RfDarkMatter0+vec);
RegionFrameCube.writeByStride(ptr,numFrames);
ptr += numFrames; //shift to next PCA vector
}
}
void TranslatorsFlowByFlow::TranslateRegionFramesPerPoint_RegionToCube( LayoutCubeWithRegions<int> & RegionFramesPerPoint, void * bkinfo, size_t regId)
{
BkgModelWorkInfo* info = (BkgModelWorkInfo*)bkinfo;
WorkSet myJob(info);
size_t numFrames = myJob.getNumFrames();
int *Fpp = NULL;
if (myJob.performExpTailFitting())
Fpp = myJob.GetETFFramesPerPoint();
else
Fpp = myJob.GetStdFramesPerPoint();
//frames by region by param cuda
RegionFramesPerPoint.setRWStrideX();
RegionFramesPerPoint.setRWPtrRegion(regId);
RegionFramesPerPoint.writeByStride(Fpp,numFrames);
}
void TranslatorsFlowByFlow::TranslateEmphasis_RegionToCube(LayoutCubeWithRegions<float> & RegionEmphasis, void * bkinfo, size_t regId)
{
BkgModelWorkInfo* info = (BkgModelWorkInfo*)bkinfo;
WorkSet myJob(info);
float * ptr =myJob.getEmphVec();
int numFrames = myJob.getNumFrames();
RegionEmphasis.setRWPtrRegion(regId);
RegionEmphasis.setRWStrideX();
for(int f=0; f< MAX_POISSON_TABLE_COL* numFrames ; f++)
{
RegionEmphasis.write(ptr[f]);
}
}
void TranslatorsFlowByFlow::TranslateNonZeroEmphasisFrames_RegionToCube(LayoutCubeWithRegions<int> & RegionNonZeroEmphFrames, void * bkinfo, size_t regId)
{
BkgModelWorkInfo* info = (BkgModelWorkInfo*)bkinfo;
WorkSet myJob(info);
RegionNonZeroEmphFrames.setRWStrideX();
RegionNonZeroEmphFrames.setRWPtrRegion(regId,0,0,NzEmphFrames);
int * ptr = myJob.GetNonZeroEmphasisFrames();
for(int i = 0; i < MAX_POISSON_TABLE_COL; i++){
RegionNonZeroEmphFrames.write(ptr[i]);
}
}
void TranslatorsFlowByFlow::TranslateNucRise_RegionToCube(LayoutCubeWithRegions<float> & NucRise, void *bkinfo, size_t flowIdx, size_t regId)
{
//float * nucRise, size_t numFrames, size_t regId)
BkgModelWorkInfo* info = (BkgModelWorkInfo*)bkinfo;
WorkSet myJob(info);
int numFrames = myJob.getNumFrames();
float * ptr = info->bkgObj->region_data->my_regions.cache_step.nuc_rise_fine_step;
ptr += flowIdx*numFrames*ISIG_SUB_STEPS_SINGLE_FLOW, myJob.getNumFrames();
NucRise.setRWPtrRegion(regId);
NucRise.setRWStrideX();
for(int f=0; f< ISIG_SUB_STEPS_SINGLE_FLOW*numFrames; f++)
{
NucRise.write(ptr[f]);
}
}
void TranslatorsFlowByFlow::TranslatePerFlowRegionParams_RegionToCube(LayoutCubeWithRegions<PerFlowParamsRegion> & PerFlowParamReg, void * bkinfo, size_t flowIdx, size_t regId )
{
BkgModelWorkInfo* info = (BkgModelWorkInfo*)bkinfo;
reg_params * rp = &(info->bkgObj->region_data->my_regions.rp);
PerFlowParamsRegion & ref = PerFlowParamReg.refAtReg(regId);
ref.setCopyDrift(rp->CopyDrift);
ref.setDarkness(rp->darkness[0]);
ref.setRatioDrift(rp->RatioDrift);
ref.setSigma(*(rp->AccessSigma()));
ref.setFineStart(info->bkgObj->region_data->my_regions.cache_step.i_start_fine_step[flowIdx]);
ref.setCoarseStart(info->bkgObj->region_data->my_regions.cache_step.i_start_coarse_step[flowIdx]);
ref.setTMidNuc(rp->AccessTMidNuc()[0]);
ref.setTMidNucShift(rp->nuc_shape.t_mid_nuc_shift_per_flow[flowIdx]);
ref.setTshift(rp->tshift);
#if TRANSLATE_DEBUG_OUTPUT
cout << "DEBUG regId " << regId << " ";
ref.print();
#endif
}
void UpdatePerFlowRegionParams_RegionToCube(LayoutCubeWithRegions<PerFlowParamsRegion> & PerFlowParamReg, reg_params * rp, size_t flowIdx, size_t regId )
{
//BkgModelWorkInfo* info = (BkgModelWorkInfo*)bkinfo;
// reg_params * rp = &(info->bkgObj->region_data->my_regions.rp);
PerFlowParamsRegion & ref = PerFlowParamReg.refAtReg(regId);
ref.setTMidNucShift(rp->nuc_shape.t_mid_nuc_shift_per_flow[flowIdx]);
}
void TranslatorsFlowByFlow::TranslateConstantRegionParams_RegionToCube(LayoutCubeWithRegions<ConstantParamsRegion> & ConstParamReg, void * bkinfo, size_t regId)
{
BkgModelWorkInfo* info = (BkgModelWorkInfo*)bkinfo;
reg_params * rp = &(info->bkgObj->region_data->my_regions.rp);
ConstantParamsRegion & ref = ConstParamReg.refAtReg(regId);
ref.setMoleculesToMicromolarConversion(rp->molecules_to_micromolar_conversion);
ref.setSens(rp->sens);
ref.setTauE(rp->tauE);
ref.setTauRM(rp->tau_R_m);
ref.setTauRO(rp->tau_R_o);
ref.setTimeStart(info->bkgObj->region_data->time_c.time_start);
ref.setT0Frame(info->bkgObj->region_data->t0_frame);
ref.setMinTmidNuc(info->bkgObj->region_data->my_regions.rp_low.AccessTMidNuc()[0]);
ref.setMaxTmidNuc(info->bkgObj->region_data->my_regions.rp_high.AccessTMidNuc()[0]);
ref.setMinCopyDrift(*(info->bkgObj->region_data->my_regions.rp_low.AccessCopyDrift()));
ref.setMaxCopyDrift((*info->bkgObj->region_data->my_regions.rp_high.AccessCopyDrift()));
ref.setMinRatioDrift(*(info->bkgObj->region_data->my_regions.rp_low.AccessRatioDrift()));
ref.setMaxRatioDrift(*(info->bkgObj->region_data->my_regions.rp_high.AccessRatioDrift()));
#if TRANSLATE_DEBUG_OUTPUT
cout << "DEBUG regId " << regId << " ";
ref.print();
#endif
}
void TranslatorsFlowByFlow::TranslatePerNucRegionParams_RegionToCube(LayoutCubeWithRegions<PerNucParamsRegion> & PerNucCube, void * bkinfo, size_t regId)
{
BkgModelWorkInfo* info = (BkgModelWorkInfo*)bkinfo;
reg_params * rp = &(info->bkgObj->region_data->my_regions.rp);
PerNucCube.setRWStrideZ(); //step through planes, each plane one nuc
PerNucCube.setRWPtrRegion(regId);
for(int i = 0; i < NUMNUC; i++ ){
PerNucParamsRegion & ref = PerNucCube.ref();
ref.setD(rp->AccessD()[i]);
ref.setKmax(rp->kmax[i]);
ref.setKrate(rp->krate[i]);
ref.setNucModifyRatio(rp->AccessNucModifyRatio()[i]);
ref.setTMidNucDelay(rp->nuc_shape.t_mid_nuc_delay[i]);
ref.setC(rp->nuc_shape.C[i]);
ref.setSigmaMult(rp->nuc_shape.sigma_mult[i]);
#if TRANSLATE_DEBUG_OUTPUT
cout << "DEBUG regId " << regId << " NucId " << i << " ";
ref.print();
#endif
}
}
void TranslatorsFlowByFlow::TranslatePerFlowRegionParams_CubeToRegion(LayoutCubeWithRegions<PerFlowParamsRegion> &perFlowRegParams, void *bkgInfo, size_t regId)
{
BkgModelWorkInfo* info = (BkgModelWorkInfo*)bkgInfo;
WorkSet myJob(info);
reg_params *rp = myJob.getRegionParams();
PerFlowParamsRegion pfRegP = perFlowRegParams.getAtReg(regId);
*(rp->AccessTMidNuc()) = pfRegP.getTMidNuc();
*(rp->AccessRatioDrift()) = pfRegP.getRatioDrift();
*(rp->AccessCopyDrift()) = pfRegP.getCopyDrift();
*(rp->AccessTMidNucShiftPerFlow()) = pfRegP.getTMidNucShift();
}
void ConstanSymbolCopier::PopulateSymbolConstantImgageParams(ImgRegParams iP, ConstantFrameParams & CfP, void * bkinfoArray)
{
BkgModelWorkInfo* info = (BkgModelWorkInfo*)bkinfoArray;
int maxFrames = 0;
for(size_t i=0; i < iP.getNumRegions(); i++)
{
int f = info[i].bkgObj->region_data->time_c.npts();
maxFrames = (maxFrames <f )?(f):(maxFrames);
}
RawImage * rpt = info->img->raw;
CfP.setRawFrames(rpt->frames);
CfP.setUncompFrames(rpt->uncompFrames);
if(CfP.getUncompFrames() > MAX_UNCOMPRESSED_FRAMES_GPU){
cout <<"---------------------------------------------------------------------------"<<endl
<<"CUDA WARNING: The number of uncompressed frames of "<< CfP.getUncompFrames() <<" for this block " << endl
<<" exceeds the GPU frame buffer limit for a maximum of " << MAX_UNCOMPRESSED_FRAMES_GPU << " frames." <<endl
<<" No more than "<< MAX_UNCOMPRESSED_FRAMES_GPU <<" uncompressed frames will used!!" <<endl
<<"---------------------------------------------------------------------------"<<endl;
CfP.setUncompFrames(MAX_UNCOMPRESSED_FRAMES_GPU);
}
CfP.setMaxCompFrames(maxFrames);
for(int i=0; i < rpt->uncompFrames; i++){
CfP.interpolatedFrames[i] = rpt->interpolatedFrames[i];
CfP.interpolatedMult[i] = rpt->interpolatedMult[i];
CfP.interpolatedDiv[i] = rpt->interpolatedDiv[i];
}
CfP.print();
copySymbolsToDevice(CfP);
}
void ConstanSymbolCopier::PopulateSymbolConstantGlobal( ConstantParamsGlobal & CpG, void * bkinfo)
{
BkgModelWorkInfo* info = (BkgModelWorkInfo*)bkinfo;
WorkSet myJob(info);
reg_params * rp = myJob.getRegionParams();
CpG.setAdjKmult(myJob.getkmultAdj());
CpG.setMinKmult(myJob.getkmultLowLimit());
CpG.setMaxKmult(myJob.getkmultHighLimit());
CpG.setMinAmpl(myJob.getAmpLowLimit());
CpG.setMaxTauB(rp->max_tauB);
CpG.setMinTauB(rp->min_tauB);
CpG.setScaleLimit(myJob.expTailFitBkgAdjLimit());
CpG.setTailDClowerBound(myJob.expTailFitBkgDcLowerLimit());
CpG.setMagicDivisorForTiming(rp->nuc_shape.magic_divisor_for_timing);
CpG.setNucFlowSpan(rp->nuc_shape.nuc_flow_span);
CpG.setValveOpen(rp->nuc_shape.valve_open);
CpG.setEmphWidth(myJob.getEmphasisData().emphasis_width);
CpG.setEmphAmpl(myJob.getEmphasisData().emphasis_ampl);
CpG.setEmphParams(myJob.getEmphasisData().emp);
CpG.setClonalFilterFirstFlow(info->polyclonal_filter_opts.mixed_first_flow);
CpG.setClonalFilterLastFlow(info->polyclonal_filter_opts.mixed_last_flow);
CpG.print();
copySymbolsToDevice(CpG);
}
void ConstanSymbolCopier::PopulateSymbolPerFlowGlobal(PerFlowParamsGlobal & pFpG, void * bkinfo)
{
BkgModelWorkInfo* info = (BkgModelWorkInfo*)bkinfo;
WorkSet myJob(info);
pFpG.setRealFnum(myJob.getAbsoluteFlowNum());
//pFpG.setFlowIdx(0); // ToDo remove when data only copied by flow
//pFpG.setNucId(myJob.getFlowIdxMap()[flowIdx]);
pFpG.setNucId(myJob.getNucIdForFlow(myJob.getAbsoluteFlowNum()));
pFpG.print();
copySymbolsToDevice(pFpG);
}
void ConstanSymbolCopier::PopulateSymbolConfigParams(ConfigParams & confP, void * bkinfo)
{
BkgModelWorkInfo* info = (BkgModelWorkInfo*)bkinfo;
WorkSet myJob(info);
reg_params * rp = myJob.getRegionParams();
confP.clear();
if(myJob.fitkmultAlways()) confP.setFitKmult();
if(rp->fit_taue) confP.setFitTauE();
if(myJob.performExpTailFitting()) confP.setPerformExpTailFitting();
if(myJob.performBkgAdjInExpTailFit()) confP.setPerformBkgAdjInExpTailFit();
if(rp->use_alternative_etbR_equation) confP.setUseAlternativeEtbRequation();
if(myJob.useDarkMatterPCA()) confP.setUseDarkMatterPCA();
if(myJob.useDynamicEmphasis()) confP.setUseDynamicEmphasis();
if(myJob.performCrossTalkCorrection()) confP.setPerformTraceLevelXTalk();
else if(myJob.performWellsLevelXTalkCorrection()) confP.setPerformWellsLevelXTalk(); //ToDo: Wells Level is default and will not be set if Trace level is already set
if(myJob.performPolyClonalFilter()) confP.setPerformPolyClonalFilter();
if(myJob.fitTmidNucShift()) confP.setFitTmidNucShift();
confP.print();
copySymbolsToDevice(confP);
}
/*void ConstanSymbolCopier::PopulateSymbolConstantRegParamBounds( ConstantRegParamBounds & CpB, void * bkinfo)
{
BkgModelWorkInfo* info = (BkgModelWorkInfo*)bkinfo;
WorkSet myJob(info);
reg_params * rpMin = myJob.getRegionParamMinBounds();
reg_params * rpMax = myJob.getRegionParamMaxBounds();
CpB.setMinTmidNuc(rpMin->AccessTMidNuc()[0]);
CpB.setMaxTmidNuc(rpMax->AccessTMidNuc()[0]);
CpB.setMinRatioDrift(rpMin->AccessRatioDrift()[0]);
CpB.setMinCopyDrift(rpMin->AccessCopyDrift()[0]);
CpB.setMaxCopyDrift(rpMax->AccessCopyDrift()[0]);
CpB.print();
copySymbolsToDevice(CpB);
}*/
void BuildGenericSampleMask(
bool * sampleMask, //global base pointer to mask Initialized with false
const ImgRegParams &imgP,
size_t regId)
{
for (int sampIdx = 0; sampIdx < 100; sampIdx ++){
size_t large_num = sampIdx*104729;
size_t sampRow = large_num / imgP.getRegH(regId);
sampRow = sampRow % imgP.getRegH(regId);
size_t sampCol = large_num % imgP.getRegW(regId);
// cout << "sample id " << sampIdx << ": " << imgP.getWellIdx(regId,sampCol,sampRow) << endl;
sampleMask[ imgP.getWellIdx(regId,sampCol,sampRow) ] = true;
// cout << "sample id " << sampIdx << " mask update done" << endl;
}
}
void BuildMaskFromBeadParams_RegionToCube( LayoutCubeWithRegions<unsigned short> & Mask,
size_t numLBeads,
BeadParams * bP,
size_t regId)
{
size_t x,y;
// ImgRegParams ImageParams = Mask.getParams();
for(size_t idx = 0; idx < numLBeads; idx++ ){
x = bP->x;
y = bP->y;
Mask.setRWPtrRegion(regId,x,y);
Mask.write(MaskLive);
bP++;
}
}
//////////////////////////////////////////////////////////////////////
//CUBE PER FLOW DUMPER CLASS
//this class can be used to dump multiple flows in the new layout cube design from the old bkinfo object in a random order.
template <typename T>
CubePerFlowDump<T>::CubePerFlowDump(size_t planeW, size_t planeH, size_t regW, size_t regH, size_t planes, size_t numFlowsinBlock):
flowBlockBase(0),regionsDumped(0),regionsDumpedLastBlock(0),FlowBlockSize(numFlowsinBlock),filePathPrefix("FlowPlaneDump")
{ // change from plane to cube
ImageParams.init(planeW,planeH,regW,regH);
for(size_t i=0; i< FlowBlockSize; i++)
FlowCubes.push_back (new LayoutCubeWithRegions<T>(planeW, planeH, regW, regH,planes,HostMem));
};
template <typename T>
CubePerFlowDump<T>::CubePerFlowDump( ImgRegParams iP, size_t planes, size_t numFlowsinBlock):
flowBlockBase(0),regionsDumped(0),regionsDumpedLastBlock(0),FlowBlockSize(numFlowsinBlock),filePathPrefix("FlowPlaneDump")
{ // change from plane to cube
ImageParams = iP;
for(size_t i=0; i< FlowBlockSize; i++)
FlowCubes.push_back (new LayoutCubeWithRegions<T>(iP.getImgW(), iP.getImgH(), iP.getRegW(),iP.getRegH(),planes,HostMem));
};
template <typename T>
CubePerFlowDump<T>::~CubePerFlowDump(){
destroy();
}
template <typename T>
void CubePerFlowDump<T>::destroy(){
while(FlowCubes.size() > 0){
delete *FlowCubes.begin();
FlowCubes.erase(FlowCubes.begin());
}
}
template <typename T>
void CubePerFlowDump<T>::setFilePathPrefix(string filep)
{
filePathPrefix = filep;
}
template <typename T>
void CubePerFlowDump<T>::WriteOneFlowToFile(LayoutCubeWithRegions<T> * dumpCube, size_t dumpflow)
{
ostringstream filename;
filename << DUMP_PATH << "/" << filePathPrefix << dumpflow << ".dat";
ofstream myFile (filename.str().c_str(), ios::binary);
cout << filename.str() << ": writing flow cube for " << regionsDumped << " regions at flow " << dumpflow << endl;
dumpCube->dumpCubeToFile(myFile);
myFile.close();
}
template <typename T>
void CubePerFlowDump<T>::WriteAllFlowsToFile(){
for(size_t f = 0; f<FlowBlockSize; f++){
WriteOneFlowToFile( FlowCubes[f], flowBlockBase+f);
}
}
template <typename T>
void CubePerFlowDump<T>::ClearFlowCubes(){
for(size_t f = 0; f<FlowBlockSize; f++){
FlowCubes[f]->memSet(0);
}
}
template <typename T>
void CubePerFlowDump<T>::DumpFlowBlockRegion(size_t regId, T* data, size_t flowBlockBegin, size_t nPerFlow, size_t flowstride, size_t plane )
{
if(nPerFlow > flowstride) flowstride = nPerFlow;
if(flowBlockBegin != flowBlockBase){
if (regionsDumped != regionsDumpedLastBlock ){
regionsDumpedLastBlock = regionsDumped;
WriteAllFlowsToFile();
}
flowBlockBase = flowBlockBegin;
regionsDumped = 0;
}
//size_t regId = ImageParams.getRegId(regCol,regRow);
//ToDo get rid of constparam structure and extract only needed params
for(size_t f = 0; f<FlowBlockSize; f++){
FlowCubes[f]->setRWStrideX();
FlowCubes[f]->setRWPtrRegion(regId,0,0,plane);
for(size_t w=0; w < nPerFlow; w++)
FlowCubes[f]->write(data[w]);
data += flowstride;
}
regionsDumped++;
if(regionsDumped == ImageParams.getNumRegions() || regionsDumped == regionsDumpedLastBlock){
regionsDumpedLastBlock = regionsDumped; //set here to prevent writing same data again in next flow block
WriteAllFlowsToFile();
}
}
template <typename T>
void CubePerFlowDump<T>::DumpOneFlowRegion(size_t regId, LayoutCubeWithRegions<T> & input, size_t iRegId, size_t flowBlockBegin, size_t flowInBlockIdx, size_t startPlane, size_t numPlanes)
{
size_t f = flowInBlockIdx;
assert(FlowCubes[f]->getRegW(regId) == input.getRegW(iRegId) && FlowCubes[f]->getRegH(regId) == input.getRegH(iRegId)); //check for identical region size
assert( startPlane+numPlanes <= FlowCubes[f]->getDimZ()); //check for enough planes
assert( startPlane+numPlanes <= input.getDimZ()); //check for enough planes
if(flowBlockBegin != flowBlockBase){
if (regionsDumped != regionsDumpedLastBlock ){
regionsDumpedLastBlock = regionsDumped;
WriteAllFlowsToFile();
}
flowBlockBase = flowBlockBegin;
regionsDumped = 0;
}
FlowCubes[f]->copyReg(regId,input,iRegId,numPlanes,startPlane);
if(f== FlowBlockSize-1) regionsDumped++; //hacky and requires one region dumps all flowblocksize flows sequentially
if(regionsDumped > 0){
if(regionsDumped == ImageParams.getNumRegions() || regionsDumped == regionsDumpedLastBlock){
regionsDumpedLastBlock = regionsDumped; //set here to prevent writing same data again in next flow block
WriteAllFlowsToFile();
}
}
}
template <typename T>
void CubePerFlowDump<T>::DumpOneFlowBlock(LayoutCubeWithRegions<T> & input, size_t flowBlockStartFlow, size_t flowInBlockIdx)
{
flowBlockBase = flowBlockStartFlow;
FlowCubes[flowInBlockIdx]->copy(input);
regionsDumped = input.getParams().getNumRegions();
WriteOneFlowToFile(FlowCubes[flowInBlockIdx], flowBlockStartFlow + flowInBlockIdx);
}
template <typename T>
void CubePerFlowDump<T>::DumpFlowBlockRegion(size_t ax, size_t ay, T* data, size_t realflowIdx, size_t nPerFlow, size_t flowstride )
{
assert(ax < ImageParams.getImgW() && ay < ImageParams.getImgH());
size_t regId = ImageParams.getRegId(ax,ay);
DumpFlowBlockRegion(regId, data, realflowIdx,nPerFlow, flowstride );
}
template <typename T>
void CubePerFlowDump<T>::ReadInOneFlow(size_t realflowIdx)
{
ostringstream filename;
filename << DUMP_PATH << "/" << filePathPrefix << realflowIdx << ".dat";
ifstream myFile (filename.str().c_str(), ios::binary);
if(!myFile){
cerr << "file " << filename.str() << " could not be opened!" << endl;
exit (-1);
}
cout << "reading data at flow " << realflowIdx << " from file " << filename.str() << endl;
if(!FlowCubes[0]->readCubeFromFile(myFile))
{
cout << "Error reading flow " << realflowIdx << " from file " << filename.str() << " buffer dimensions missmatch!" << endl;
myFile.close();
exit(-1);
}
myFile.close();
}
template <typename T>
LayoutCubeWithRegions<T> & CubePerFlowDump<T>::getFlowCube(size_t realflowIdx)
{
if(realflowIdx != flowBlockBase){
ReadInOneFlow(realflowIdx);
}
flowBlockBase = realflowIdx;
return *(FlowCubes[0]);
}
///////////////////////////////////////
//Explicit declaration
/*
template class LayoutCube<short>;
template class LayoutCube<unsigned short>;
template class LayoutCube<int>;
template class LayoutCube<size_t>;
template class LayoutCube<float>;
template class LayoutCube<ConstantParamsRegion>;
template class LayoutCube<PerFlowParamsRegion>;
template class LayoutCube<PerNucParamsRegion>;
template class LayoutCube<SampleCoordPair>;
*/
template class CubePerFlowDump<float>;
template class CubePerFlowDump<short>;
template class CubePerFlowDump<reg_params>;
|
the_stack
|
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/distance_op.h"
#include "caffe2/utils/conversions.h"
#include <cub/block/block_reduce.cuh>
namespace caffe2 {
namespace {
template <typename T>
__global__ void SquaredL2DistanceKernel(
const int N, const int D, const T* X, const T* Y, T* distance) {
typedef cub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
for (int i = blockIdx.x; i < N; i += gridDim.x) {
float dist = 0.0;
for (int j = threadIdx.x; j < D; j += blockDim.x) {
T diff = X[i * D + j] - Y[i * D + j];
dist += diff * diff;
}
float total_dist = BlockReduce(temp_storage).Sum(dist);
__syncthreads();
if (threadIdx.x == 0) {
distance[i] = total_dist / 2.0;
}
}
}
} // namespace
template <>
bool SquaredL2DistanceOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto& Y = Input(1);
auto* distance = Output(0);
CAFFE_ENFORCE_EQ(X.ndim(), Y.ndim());
for (int i = 0; i < X.ndim(); ++i) {
CAFFE_ENFORCE_EQ(
X.dim32(i),
Y.dim32(i),
"Mismatch in dimensions",
X.dims(),
" / ",
Y.dims());
}
int N = X.ndim() > 0 ? X.dim32(0) : 1;
int D = X.size() / N;
distance->Resize(vector<TIndex>(size_t(1), N));
SquaredL2DistanceKernel<<<
std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N, D, X.data<float>(), Y.data<float>(), distance->mutable_data<float>());
return true;
}
namespace {
template <typename T>
__global__ void
StripedScaleKernel(const int N, const int D, const T* alpha, const T* x, T* y) {
CUDA_1D_KERNEL_LOOP(i, N * D) {
int k = i / D;
y[i] = x[i] * alpha[k];
}
}
}
template <>
bool SquaredL2DistanceGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto& Y = Input(1);
auto& dDistance = Input(2);
auto* dX = Output(0);
auto* dY = Output(1);
int N = X.ndim() > 0 ? X.dim32(0) : 1;
int D = N > 0 ? X.size() / N : 0;
CAFFE_ENFORCE(X.ndim() == Y.ndim());
for (int i = 0; i < X.ndim(); ++i) {
CAFFE_ENFORCE_EQ(
X.dim32(i),
Y.dim32(i),
"Mismatch on dimensions: ",
X.dims(),
" / ",
Y.dims());
}
CAFFE_ENFORCE_EQ(dDistance.ndim(), 1);
CAFFE_ENFORCE_EQ(dDistance.dim32(0), N);
dX->ResizeLike(X);
dY->ResizeLike(Y);
math::Sub<float, CUDAContext>(
X.size(),
X.data<float>(),
Y.data<float>(),
dX->mutable_data<float>(),
&context_);
StripedScaleKernel<float><<<
CAFFE_GET_BLOCKS(N * D),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
D,
dDistance.data<float>(),
dX->data<float>(),
dX->mutable_data<float>());
// The gradient of the other side is basically the negative.
math::Scale<float, CUDAContext>(
X.size(), -1, dX->data<float>(), dY->mutable_data<float>(), &context_);
return true;
}
namespace {
template <typename T>
__global__ void L1DistanceKernel(
const int N,
const int D,
const T* X,
const T* Y,
T* distance) {
typedef cub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
for (int i = blockIdx.x; i < N; i += gridDim.x) {
float sum = 0.0f;
for (int j = threadIdx.x; j < D; j += blockDim.x) {
sum +=
abs(convert::To<T, float>(X[i * D + j]) -
convert::To<T, float>(Y[i * D + j]));
}
float aggregate = BlockReduce(temp_storage).Sum(sum);
__syncthreads();
if (threadIdx.x == 0) {
distance[i] = aggregate;
}
}
}
} // namespace
template <>
bool L1DistanceOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto& Y = Input(1);
auto* distance = Output(0);
CAFFE_ENFORCE_EQ(X.ndim(), Y.ndim());
for (int i = 0; i < X.ndim(); ++i) {
CAFFE_ENFORCE_EQ(X.dim32(i), Y.dim32(i));
}
const int N = X.ndim() > 0 ? X.dim32(0) : 1;
const int D = N > 0 ? X.size() / N : 0;
distance->Resize(vector<TIndex>(size_t(1), N));
L1DistanceKernel<<<
std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N, D, X.data<float>(), Y.data<float>(), distance->mutable_data<float>());
return true;
}
namespace {
template <typename T>
__global__ void L1DistanceGradientKernel(
const int N,
const int D,
const T* X,
const T* Y,
const T* dDistance,
T* dX,
T* dY) {
CUDA_1D_KERNEL_LOOP(i, N * D) {
constexpr float kEps = 1e-12;
int k = i / D;
if (X[i] - Y[i] < -kEps) {
dX[i] = -dDistance[k];
dY[i] = dDistance[k];
} else if (X[i] - Y[i] > kEps) {
dX[i] = dDistance[k];
dY[i] = -dDistance[k];
} else {
dX[i] = 0;
dY[i] = 0;
}
}
}
} // namespace
template <>
bool L1DistanceGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto& Y = Input(1);
auto& dDistance = Input(2);
auto* dX = Output(0);
auto* dY = Output(1);
int N = X.ndim() > 0 ? X.dim32(0) : 1;
int D = N > 0 ? X.size() / N : 0;
CAFFE_ENFORCE(X.ndim() == Y.ndim());
for (int i = 0; i < X.ndim(); ++i) {
CAFFE_ENFORCE_EQ(
X.dim32(i),
Y.dim32(i),
"Mismatch on dimensions: ",
X.dims(),
" / ",
Y.dims());
}
CAFFE_ENFORCE_EQ(dDistance.ndim(), 1);
CAFFE_ENFORCE_EQ(dDistance.dim32(0), N);
dX->ResizeLike(X);
dY->ResizeLike(Y);
L1DistanceGradientKernel<<<
CAFFE_GET_BLOCKS(N * D),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
D,
X.data<float>(),
Y.data<float>(),
dDistance.data<float>(),
dX->mutable_data<float>(),
dY->mutable_data<float>());
return true;
}
namespace {
template <typename T>
__global__ void
DotProductKernel(const int N, const int D, const T* X, const T* Y, T* result) {
for (int i = blockIdx.x; i < N; i += gridDim.x) {
T partialSum = 0;
int offset = i * D;
for (int j = threadIdx.x; j < D; j += blockDim.x) {
partialSum += X[offset + j] * Y[offset + j];
}
typedef cub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
T sum = BlockReduce(temp_storage).Sum(partialSum);
__syncthreads();
if (threadIdx.x == 0) {
result[i] = sum;
}
}
}
// X.size() = N*D, Y.size() = N
template <typename T>
__global__ void
BatchedMul(const int N, const int D, const T* X, const T* Y, T* result) {
CUDA_1D_KERNEL_LOOP(i, N * D) {
result[i] = X[i] * Y[i / D];
}
}
// X.size() = N*D, Y.size() = N
template <typename T>
__global__ void Scale2AxpyScale(
const int N,
const T* scale,
const T* XY,
const T* XN,
T* result) {
CUDA_1D_KERNEL_LOOP(i, N) {
result[i] = -scale[i] * XY[i] / (XN[i] * XN[i]);
}
}
// X.size() = X*N, alpha.size() = N, Y.size() = X*N
template <typename T>
__global__ void
BatchedAxpy(const int N, const int D, const T* alpha, const T* X, T* Y) {
CUDA_1D_KERNEL_LOOP(i, N * D) {
Y[i] += X[i] * alpha[i / D];
}
}
} // namespace
template <>
bool CosineSimilarityOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(X_IN);
auto& Y = Input(Y_IN);
auto* result = Output(COS_OUT);
CAFFE_ENFORCE_EQ(X.ndim(), Y.ndim());
for (int i = 0; i < X.ndim(); ++i) {
CAFFE_ENFORCE_EQ(X.dim32(i), Y.dim32(i));
}
const int N = X.ndim() > 0 ? X.dim32(0) : 1;
const int D = X.size_from_dim(1);
result->Resize(N);
float* result_data = result->mutable_data<float>();
const float* X_data = X.data<float>();
const float* Y_data = Y.data<float>();
// Auxiliary arrays, one allocation of memory
aux_.Resize(2 * N);
float* aux_data = aux_.mutable_data<float>();
float* x2 = aux_data;
float* y2 = aux_data + N;
float* scale = x2;
const float kEps = 1e-12f;
DotProductKernel<<<
std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(N, D, X_data, X_data, x2);
DotProductKernel<<<
std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(N, D, Y_data, Y_data, y2);
DotProductKernel<<<
std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(N, D, X_data, Y_data, result_data);
math::Maximum<float, CUDAContext>(N, kEps, x2, x2, &context_);
math::Maximum<float, CUDAContext>(N, kEps, y2, y2, &context_);
math::Mul(N, x2, y2, scale, &context_);
math::InvSqrt(N, scale, scale, &context_);
math::Mul(N, result_data, scale, result_data, &context_);
return true;
}
template <>
bool CosineSimilarityGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(X_IN);
auto& Y = Input(Y_IN);
auto& dCos = Input(DER_COS_IN);
auto* dX = Output(DER_X_OUT);
auto* dY = Output(DER_Y_OUT);
const int N = X.ndim() > 0 ? X.dim32(0) : 1;
const int D = X.size_from_dim(1);
CAFFE_ENFORCE(X.ndim() == Y.ndim());
for (int i = 0; i < X.ndim(); ++i) {
CAFFE_ENFORCE(X.dim32(i) == Y.dim32(i));
}
CAFFE_ENFORCE(dCos.ndim() == 1);
CAFFE_ENFORCE(dCos.dim32(0) == N);
dX->ResizeLike(X);
dY->ResizeLike(Y);
const auto* X_data = X.data<float>();
const auto* Y_data = Y.data<float>();
const auto* dCos_data = dCos.data<float>();
auto* dX_data = dX->mutable_data<float>();
auto* dY_data = dY->mutable_data<float>();
// one memory allocation, a few arrays
aux_.Resize(6 * N);
float* aux_data = aux_.mutable_data<float>();
float* xn = aux_data;
float* yn = aux_data + N;
float* xy = aux_data + 2 * N;
float* xyn = aux_data + 3 * N;
float* scale = aux_data + 4 * N;
float* axpy_scale = aux_data + 5 * N;
float kEps = 1e-12f;
// ||x||
DotProductKernel<<<
std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(N, D, X_data, X_data, xn);
math::Maximum<float, CUDAContext>(N, kEps, xn, xn, &context_);
math::Sqrt<float, CUDAContext>(N, xn, xn, &context_);
// ||y||
DotProductKernel<<<
std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(N, D, Y_data, Y_data, yn);
math::Maximum<float, CUDAContext>(N, kEps, yn, yn, &context_);
math::Sqrt<float, CUDAContext>(N, yn, yn, &context_);
// ||x|| * || y ||
math::Mul<float, CUDAContext>(N, xn, yn, xyn, &context_);
DotProductKernel<<<
std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(N, D, X_data, Y_data, xy);
math::Div<float, CUDAContext>(N, dCos_data, xyn, scale, &context_);
// dX
BatchedMul<<<
std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(N, D, Y_data, scale, dX_data);
Scale2AxpyScale<<<
std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(N, scale, xy, xn, axpy_scale);
BatchedAxpy<<<
std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(N, D, axpy_scale, X_data, dX_data);
// dY
BatchedMul<<<
std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(N, D, X_data, scale, dY_data);
Scale2AxpyScale<<<
std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(N, scale, xy, yn, axpy_scale);
BatchedAxpy<<<
std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(N, D, axpy_scale, Y_data, dY_data);
return true;
}
template <>
bool DotProductOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(X_IN);
auto& Y = Input(Y_IN);
auto* result = Output(DOT_OUT);
CAFFE_ENFORCE_EQ(X.ndim(), Y.ndim());
for (int i = 0; i < X.ndim(); ++i) {
CAFFE_ENFORCE_EQ(X.dim32(i), Y.dim32(i));
}
int N, D;
if (X.size() > 0) {
N = X.ndim() > 0 ? X.dim32(0) : 1;
D = X.size() / N;
} else {
N = 0;
D = 0;
}
result->Resize(N);
DotProductKernel<<<
std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N, D, X.data<float>(), Y.data<float>(), result->mutable_data<float>());
return true;
}
namespace {
template <typename T>
__global__ void DotProductGradientKernel(
const int N,
const int D,
const T* X,
const T* Y,
const T* dDot,
T* dX,
T* dY) {
CUDA_1D_KERNEL_LOOP(i, N * D) {
T scale = dDot[i / D];
dX[i] = Y[i] * scale;
dY[i] = X[i] * scale;
}
}
} // namespace
template <>
bool DotProductGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(X_IN);
auto& Y = Input(Y_IN);
auto& dDot = Input(DER_DOT_IN);
auto* dX = Output(DER_X_OUT);
auto* dY = Output(DER_Y_OUT);
int N, D;
if (X.size() > 0) {
N = X.ndim() > 0 ? X.dim32(0) : 1;
D = X.size() / N;
} else {
N = 0;
D = 0;
}
CAFFE_ENFORCE(X.ndim() == Y.ndim());
for (int i = 0; i < X.ndim(); ++i) {
CAFFE_ENFORCE(X.dim32(i) == Y.dim32(i));
}
CAFFE_ENFORCE(dDot.ndim() == 1);
CAFFE_ENFORCE(dDot.dim32(0) == N);
dX->ResizeLike(X);
dY->ResizeLike(Y);
DotProductGradientKernel<<<
CAFFE_GET_BLOCKS(N * D),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
D,
X.data<float>(),
Y.data<float>(),
dDot.data<float>(),
dX->mutable_data<float>(),
dY->mutable_data<float>());
return true;
}
REGISTER_CUDA_OPERATOR(SquaredL2Distance,
SquaredL2DistanceOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(SquaredL2DistanceGradient,
SquaredL2DistanceGradientOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(L1Distance, L1DistanceOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
L1DistanceGradient,
L1DistanceGradientOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(DotProduct, DotProductOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
DotProductGradient,
DotProductGradientOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
CosineSimilarity,
CosineSimilarityOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
CosineSimilarityGradient,
CosineSimilarityGradientOp<float, CUDAContext>);
} // namespace caffe2
|
the_stack
|
#include "utility.hpp"
using namespace ppl::common;
namespace ppl {
namespace cv {
namespace cuda {
template <typename Tsrc>
__global__
void unmaskedDevC1Kernel(const Tsrc* src, int rows, int cols, int src_stride,
uint blocks, float* mean_values,
float* stddev_values) {
__shared__ float partial_sums[BLOCK_SIZE];
int threadIdx_x = threadIdx.x;
int element_x = ((blockIdx.x << BLOCK_SHIFT) + threadIdx_x) << 2;
int element_y = blockIdx.y;
partial_sums[threadIdx_x] = 0;
Tsrc* input;
Tsrc value0, value1, value2, value3;
float mean = mean_values[0];
for (; element_y < rows; element_y += gridDim.y) {
if (element_x < cols) {
input = (Tsrc*)((uchar*)src + element_y * src_stride);
value0 = input[element_x];
value1 = input[element_x + 1];
value2 = input[element_x + 2];
value3 = input[element_x + 3];
if (element_x < cols - 3) {
partial_sums[threadIdx_x] += (value0 - mean) * (value0 - mean);
partial_sums[threadIdx_x] += (value1 - mean) * (value1 - mean);
partial_sums[threadIdx_x] += (value2 - mean) * (value2 - mean);
partial_sums[threadIdx_x] += (value3 - mean) * (value3 - mean);
}
else {
partial_sums[threadIdx_x] += (value0 - mean) * (value0 - mean);
if (element_x < cols - 1) {
partial_sums[threadIdx_x] += (value1 - mean) * (value1 - mean);
}
if (element_x < cols - 2) {
partial_sums[threadIdx_x] += (value2 - mean) * (value2 - mean);
}
}
}
}
__syncthreads();
#if BLOCK_SIZE == 512
if (threadIdx_x < 256) {
partial_sums[threadIdx_x] += partial_sums[threadIdx_x + 256];
}
__syncthreads();
#endif
#if BLOCK_SIZE >= 256
if (threadIdx_x < 128) {
partial_sums[threadIdx_x] += partial_sums[threadIdx_x + 128];
}
__syncthreads();
#endif
#if BLOCK_SIZE >= 128
if (threadIdx_x < 64) {
partial_sums[threadIdx_x] += partial_sums[threadIdx_x + 64];
}
__syncthreads();
#endif
if (threadIdx_x < 32) {
partial_sums[threadIdx_x] += partial_sums[threadIdx_x + 32];
}
__syncthreads();
if (threadIdx_x < 16) {
partial_sums[threadIdx_x] += partial_sums[threadIdx_x + 16];
}
__syncthreads();
if (threadIdx_x < 8) {
partial_sums[threadIdx_x] += partial_sums[threadIdx_x + 8];
}
__syncthreads();
if (threadIdx_x < 4) {
partial_sums[threadIdx_x] += partial_sums[threadIdx_x + 4];
}
__syncthreads();
if (threadIdx_x < 2) {
partial_sums[threadIdx_x] += partial_sums[threadIdx_x + 2];
}
__syncthreads();
if (threadIdx_x < 1) {
partial_sums[threadIdx_x] += partial_sums[threadIdx_x + 1];
}
__syncthreads();
if (threadIdx_x == 0) {
atomicAdd(stddev_values, partial_sums[0]);
uint local_count = atomicInc(&block_count, blocks);
bool is_last_block_done = (local_count == (blocks - 1));
if (is_last_block_done) {
int elements = rows * cols;
float weight = 1.f / elements;
float square = stddev_values[0] * weight;
stddev_values[0] = sqrtf(square);
block_count = 0;
}
}
}
template <typename Tsrc, typename Tsrcn, typename Tsumn>
__global__
void unmaskedDevCnKernel(const Tsrc* src, int rows, int cols, int channels,
int src_stride, uint blocks, float* mean_values,
float* stddev_values) {
__shared__ Tsumn partial_sums[BLOCK_SIZE];
int threadIdx_x = threadIdx.x;
int element_x = (blockIdx.x << BLOCK_SHIFT) + threadIdx_x;
int element_y = blockIdx.y;
setZeroVector(partial_sums[threadIdx_x]);
Tsrcn* input;
Tsrcn value0;
Tsumn mean, value1;
readVector(mean, mean_values);
for (; element_y < rows; element_y += gridDim.y) {
if (element_x < cols) {
input = (Tsrcn*)((uchar*)src + element_y * src_stride);
value0 = input[element_x];
assignVector(value1, value0);
value1 -= mean;
mulAdd(partial_sums[threadIdx_x], value1, value1);
}
}
__syncthreads();
#if BLOCK_SIZE == 512
if (threadIdx_x < 256) {
partial_sums[threadIdx_x] += partial_sums[threadIdx_x + 256];
}
__syncthreads();
#endif
#if BLOCK_SIZE >= 256
if (threadIdx_x < 128) {
partial_sums[threadIdx_x] += partial_sums[threadIdx_x + 128];
}
__syncthreads();
#endif
#if BLOCK_SIZE >= 128
if (threadIdx_x < 64) {
partial_sums[threadIdx_x] += partial_sums[threadIdx_x + 64];
}
__syncthreads();
#endif
if (threadIdx_x < 32) {
partial_sums[threadIdx_x] += partial_sums[threadIdx_x + 32];
}
__syncthreads();
if (threadIdx_x < 16) {
partial_sums[threadIdx_x] += partial_sums[threadIdx_x + 16];
}
__syncthreads();
if (threadIdx_x < 8) {
partial_sums[threadIdx_x] += partial_sums[threadIdx_x + 8];
}
__syncthreads();
if (threadIdx_x < 4) {
partial_sums[threadIdx_x] += partial_sums[threadIdx_x + 4];
}
__syncthreads();
if (threadIdx_x < 2) {
partial_sums[threadIdx_x] += partial_sums[threadIdx_x + 2];
}
__syncthreads();
if (threadIdx_x < 1) {
partial_sums[threadIdx_x] += partial_sums[threadIdx_x + 1];
}
__syncthreads();
if (threadIdx_x == 0) {
atomicAddVector(stddev_values, partial_sums[0]);
uint local_count = atomicInc(&block_count, blocks);
bool is_last_block_done = (local_count == (blocks - 1));
if (is_last_block_done) {
int elements = rows * cols;
float weight = 1.f / elements;
float square = stddev_values[0] * weight;
stddev_values[0] = sqrtf(square);
if (channels > 2) {
square = stddev_values[1] * weight;
stddev_values[1] = sqrtf(square);
square = stddev_values[2] * weight;
stddev_values[2] = sqrtf(square);
}
if (channels > 3) {
square = stddev_values[3] * weight;
stddev_values[3] = sqrtf(square);
}
block_count = 0;
}
}
}
template <typename Tsrc>
__global__
void maskedDevC1Kernel(const Tsrc* src, int rows, int cols, int src_stride,
const uchar* mask, int mask_stride, uint blocks,
float* mean_values, float* stddev_values) {
__shared__ float partial_sums[BLOCK_SIZE];
__shared__ uint partial_counts[BLOCK_SIZE];
int threadIdx_x = threadIdx.x;
int element_x = ((blockIdx.x << BLOCK_SHIFT) + threadIdx_x) << 2;
int element_y = blockIdx.y;
partial_sums[threadIdx_x] = 0;
partial_counts[threadIdx_x] = 0;
Tsrc* input;
uchar* mask_row;
Tsrc value0, value1, value2, value3;
uchar mvalue0, mvalue1, mvalue2, mvalue3;
float mean = mean_values[0];
for (; element_y < rows; element_y += gridDim.y) {
if (element_x < cols) {
input = (Tsrc*)((uchar*)src + element_y * src_stride);
mask_row = (uchar*)((uchar*)mask + element_y * mask_stride);
value0 = input[element_x];
value1 = input[element_x + 1];
value2 = input[element_x + 2];
value3 = input[element_x + 3];
mvalue0 = mask_row[element_x];
mvalue1 = mask_row[element_x + 1];
mvalue2 = mask_row[element_x + 2];
mvalue3 = mask_row[element_x + 3];
if (mvalue0 > 0) {
partial_sums[threadIdx_x] += (value0 - mean) * (value0 - mean);
partial_counts[threadIdx_x] += 1;
}
if (mvalue1 > 0 && element_x < cols - 1) {
partial_sums[threadIdx_x] += (value1 - mean) * (value1 - mean);
partial_counts[threadIdx_x] += 1;
}
if (mvalue2 > 0 && element_x < cols - 2) {
partial_sums[threadIdx_x] += (value2 - mean) * (value2 - mean);
partial_counts[threadIdx_x] += 1;
}
if (mvalue3 > 0 && element_x < cols - 3) {
partial_sums[threadIdx_x] += (value3 - mean) * (value3 - mean);
partial_counts[threadIdx_x] += 1;
}
}
}
__syncthreads();
#if BLOCK_SIZE == 512
if (threadIdx_x < 256) {
partial_sums[threadIdx_x] += partial_sums[threadIdx_x + 256];
partial_counts[threadIdx_x] += partial_counts[threadIdx_x + 256];
}
__syncthreads();
#endif
#if BLOCK_SIZE >= 256
if (threadIdx_x < 128) {
partial_sums[threadIdx_x] += partial_sums[threadIdx_x + 128];
partial_counts[threadIdx_x] += partial_counts[threadIdx_x + 128];
}
__syncthreads();
#endif
#if BLOCK_SIZE >= 128
if (threadIdx_x < 64) {
partial_sums[threadIdx_x] += partial_sums[threadIdx_x + 64];
partial_counts[threadIdx_x] += partial_counts[threadIdx_x + 64];
}
__syncthreads();
#endif
if (threadIdx_x < 32) {
partial_sums[threadIdx_x] += partial_sums[threadIdx_x + 32];
partial_counts[threadIdx_x] += partial_counts[threadIdx_x + 32];
}
__syncthreads();
if (threadIdx_x < 16) {
partial_sums[threadIdx_x] += partial_sums[threadIdx_x + 16];
partial_counts[threadIdx_x] += partial_counts[threadIdx_x + 16];
}
__syncthreads();
if (threadIdx_x < 8) {
partial_sums[threadIdx_x] += partial_sums[threadIdx_x + 8];
partial_counts[threadIdx_x] += partial_counts[threadIdx_x + 8];
}
__syncthreads();
if (threadIdx_x < 4) {
partial_sums[threadIdx_x] += partial_sums[threadIdx_x + 4];
partial_counts[threadIdx_x] += partial_counts[threadIdx_x + 4];
}
__syncthreads();
if (threadIdx_x < 2) {
partial_sums[threadIdx_x] += partial_sums[threadIdx_x + 2];
partial_counts[threadIdx_x] += partial_counts[threadIdx_x + 2];
}
__syncthreads();
if (threadIdx_x < 1) {
partial_sums[threadIdx_x] += partial_sums[threadIdx_x + 1];
partial_counts[threadIdx_x] += partial_counts[threadIdx_x + 1];
}
__syncthreads();
if (threadIdx_x == 0) {
atomicAdd(stddev_values, partial_sums[0]);
atomicAdd(&mask_count, partial_counts[0]);
uint local_count = atomicInc(&block_count, blocks);
bool is_last_block_done = (local_count == (blocks - 1));
if (is_last_block_done) {
float weight = 1.f / mask_count;
float square = stddev_values[0] * weight;
stddev_values[0] = sqrtf(square);
block_count = 0;
mask_count = 0;
}
}
}
template <typename Tsrc, typename Tsrcn, typename Tsumn>
__global__
void maskedDevCnKernel(const Tsrc* src, int rows, int cols, int channels,
int src_stride, const uchar* mask, int mask_stride,
uint blocks, float* mean_values, float* stddev_values) {
__shared__ Tsumn partial_sums[BLOCK_SIZE];
__shared__ uint partial_counts[BLOCK_SIZE];
int threadIdx_x = threadIdx.x;
int element_x = (blockIdx.x << BLOCK_SHIFT) + threadIdx_x;
int element_y = blockIdx.y;
setZeroVector(partial_sums[threadIdx_x]);
partial_counts[threadIdx_x] = 0;
Tsrcn* input;
uchar* mask_row;
Tsrcn value0;
uchar mvalue;
Tsumn mean, value1;
readVector(mean, mean_values);
for (; element_y < rows; element_y += gridDim.y) {
if (element_x < cols) {
input = (Tsrcn*)((uchar*)src + element_y * src_stride);
mask_row = (uchar*)((uchar*)mask + element_y * mask_stride);
value0 = input[element_x];
mvalue = mask_row[element_x];
if (mvalue > 0) {
assignVector(value1, value0);
value1 -= mean;
mulAdd(partial_sums[threadIdx_x], value1, value1);
partial_counts[threadIdx_x] += 1;
}
}
}
__syncthreads();
#if BLOCK_SIZE == 512
if (threadIdx_x < 256) {
partial_sums[threadIdx_x] += partial_sums[threadIdx_x + 256];
partial_counts[threadIdx_x] += partial_counts[threadIdx_x + 256];
}
__syncthreads();
#endif
#if BLOCK_SIZE >= 256
if (threadIdx_x < 128) {
partial_sums[threadIdx_x] += partial_sums[threadIdx_x + 128];
partial_counts[threadIdx_x] += partial_counts[threadIdx_x + 128];
}
__syncthreads();
#endif
#if BLOCK_SIZE >= 128
if (threadIdx_x < 64) {
partial_sums[threadIdx_x] += partial_sums[threadIdx_x + 64];
partial_counts[threadIdx_x] += partial_counts[threadIdx_x + 64];
}
__syncthreads();
#endif
if (threadIdx_x < 32) {
partial_sums[threadIdx_x] += partial_sums[threadIdx_x + 32];
partial_counts[threadIdx_x] += partial_counts[threadIdx_x + 32];
}
__syncthreads();
if (threadIdx_x < 16) {
partial_sums[threadIdx_x] += partial_sums[threadIdx_x + 16];
partial_counts[threadIdx_x] += partial_counts[threadIdx_x + 16];
}
__syncthreads();
if (threadIdx_x < 8) {
partial_sums[threadIdx_x] += partial_sums[threadIdx_x + 8];
partial_counts[threadIdx_x] += partial_counts[threadIdx_x + 8];
}
__syncthreads();
if (threadIdx_x < 4) {
partial_sums[threadIdx_x] += partial_sums[threadIdx_x + 4];
partial_counts[threadIdx_x] += partial_counts[threadIdx_x + 4];
}
__syncthreads();
if (threadIdx_x < 2) {
partial_sums[threadIdx_x] += partial_sums[threadIdx_x + 2];
partial_counts[threadIdx_x] += partial_counts[threadIdx_x + 2];
}
__syncthreads();
if (threadIdx_x < 1) {
partial_sums[threadIdx_x] += partial_sums[threadIdx_x + 1];
partial_counts[threadIdx_x] += partial_counts[threadIdx_x + 1];
}
__syncthreads();
if (threadIdx_x == 0) {
atomicAddVector(stddev_values, partial_sums[0]);
atomicAdd(&mask_count, partial_counts[0]);
uint local_count = atomicInc(&block_count, blocks);
bool is_last_block_done = (local_count == (blocks - 1));
if (is_last_block_done) {
float weight = 1.f / mask_count;
float square = stddev_values[0] * weight;
stddev_values[0] = sqrtf(square);
if (channels > 2) {
square = stddev_values[1] * weight;
stddev_values[1] = sqrtf(square);
square = stddev_values[2] * weight;
stddev_values[2] = sqrtf(square);
}
if (channels > 3) {
square = stddev_values[3] * weight;
stddev_values[3] = sqrtf(square);
}
block_count = 0;
mask_count = 0;
}
}
}
RetCode meanStdDev(const uchar* src, int rows, int cols, int channels,
int src_stride, const uchar* mask, int mask_stride,
float* mean_values, float* stddev_values,
cudaStream_t stream) {
PPL_ASSERT(src != nullptr);
PPL_ASSERT(mean_values != nullptr);
PPL_ASSERT(stddev_values != nullptr);
PPL_ASSERT(rows >= 1 && cols >= 1);
PPL_ASSERT(channels == 1 || channels == 3 || channels == 4);
PPL_ASSERT(src_stride >= cols * channels * (int)sizeof(uchar));
if (mask != nullptr) {
PPL_ASSERT(mask_stride >= cols * (int)sizeof(uchar));
}
int columns, grid_y;
if (channels == 1) {
columns = divideUp(cols, 4, 2);
}
else {
columns = cols;
}
dim3 block, grid;
block.x = BLOCK_SIZE;
block.y = 1;
grid.x = divideUp(columns, BLOCK_SIZE, BLOCK_SHIFT);
grid_y = MAX_BLOCKS / grid.x;
grid.y = (grid_y < rows) ? grid_y : rows;
int blocks = grid.x * grid.y;
if (mask == nullptr) {
if (channels == 1) {
unmaskedMeanC1Kernel<uchar, uint><<<grid, block, 0, stream>>>(src, rows,
cols, src_stride, blocks, mean_values);
unmaskedDevC1Kernel<uchar><<<grid, block, 0, stream>>>(src, rows, cols,
src_stride, blocks, mean_values, stddev_values);
}
else if (channels == 3) {
unmaskedMeanCnKernel<uchar, uchar3, uint3><<<grid, block, 0, stream>>>(
src, rows, cols, channels, src_stride, blocks, mean_values);
unmaskedDevCnKernel<uchar, uchar3, float3><<<grid, block, 0, stream>>>(
src, rows, cols, channels, src_stride, blocks, mean_values,
stddev_values);
}
else { // channels == 4
unmaskedMeanCnKernel<uchar, uchar4, uint4><<<grid, block, 0, stream>>>(
src, rows, cols, channels, src_stride, blocks, mean_values);
unmaskedDevCnKernel<uchar, uchar4, float4><<<grid, block, 0, stream>>>(
src, rows, cols, channels, src_stride, blocks, mean_values,
stddev_values);
}
}
else {
if (channels == 1) {
maskedMeanC1Kernel<uchar, uint><<<grid, block, 0, stream>>>(src, rows,
cols, src_stride, mask, mask_stride, blocks, mean_values);
maskedDevC1Kernel<uchar><<<grid, block, 0, stream>>>(src, rows, cols,
src_stride, mask, mask_stride, blocks, mean_values, stddev_values);
}
else if (channels == 3) {
maskedMeanCnKernel<uchar, uchar3, uint3><<<grid, block, 0, stream>>>(src,
rows, cols, channels, src_stride, mask, mask_stride, blocks,
mean_values);
maskedDevCnKernel<uchar, uchar3, float3><<<grid, block, 0, stream>>>(
src, rows, cols, channels, src_stride, mask, mask_stride, blocks,
mean_values, stddev_values);
}
else { // channels == 4
maskedMeanCnKernel<uchar, uchar4, uint4><<<grid, block, 0, stream>>>(src,
rows, cols, channels, src_stride, mask, mask_stride, blocks,
mean_values);
maskedDevCnKernel<uchar, uchar4, float4><<<grid, block, 0, stream>>>(
src, rows, cols, channels, src_stride, mask, mask_stride, blocks,
mean_values, stddev_values);
}
}
cudaError_t code = cudaGetLastError();
if (code != cudaSuccess) {
LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code);
return RC_DEVICE_RUNTIME_ERROR;
}
return RC_SUCCESS;
}
RetCode meanStdDev(const float* src, int rows, int cols, int channels,
int src_stride, const uchar* mask, int mask_stride,
float* mean_values, float* stddev_values,
cudaStream_t stream) {
PPL_ASSERT(src != nullptr);
PPL_ASSERT(mean_values != nullptr);
PPL_ASSERT(stddev_values != nullptr);
PPL_ASSERT(rows >= 1 && cols >= 1);
PPL_ASSERT(channels == 1 || channels == 3 || channels == 4);
PPL_ASSERT(src_stride >= cols * channels * (int)sizeof(float));
if (mask != nullptr) {
PPL_ASSERT(mask_stride >= cols * (int)sizeof(uchar));
}
int columns, grid_y;
if (channels == 1) {
columns = divideUp(cols, 4, 2);
}
else {
columns = cols;
}
dim3 block, grid;
block.x = BLOCK_SIZE;
block.y = 1;
grid.x = divideUp(columns, BLOCK_SIZE, BLOCK_SHIFT);
grid_y = MAX_BLOCKS / grid.x;
grid.y = (grid_y < rows) ? grid_y : rows;
int blocks = grid.x * grid.y;
if (mask == nullptr) {
if (channels == 1) {
unmaskedMeanC1Kernel<float, float><<<grid, block, 0, stream>>>(src, rows,
cols, src_stride, blocks, mean_values);
unmaskedDevC1Kernel<float><<<grid, block, 0, stream>>>(src, rows, cols,
src_stride, blocks, mean_values, stddev_values);
}
else if (channels == 3) {
unmaskedMeanCnKernel<float, float3, float3><<<grid, block, 0, stream>>>(
src, rows, cols, channels, src_stride, blocks, mean_values);
unmaskedDevCnKernel<float, float3, float3><<<grid, block, 0, stream>>>(
src, rows, cols, channels, src_stride, blocks, mean_values,
stddev_values);
}
else { // channels == 4
unmaskedMeanCnKernel<float, float4, float4><<<grid, block, 0, stream>>>(
src, rows, cols, channels, src_stride, blocks, mean_values);
unmaskedDevCnKernel<float, float4, float4><<<grid, block, 0, stream>>>(
src, rows, cols, channels, src_stride, blocks, mean_values,
stddev_values);
}
}
else {
if (channels == 1) {
maskedMeanC1Kernel<float, float><<<grid, block, 0, stream>>>(src, rows,
cols, src_stride, mask, mask_stride, blocks, mean_values);
maskedDevC1Kernel<float><<<grid, block, 0, stream>>>(src, rows, cols,
src_stride, mask, mask_stride, blocks, mean_values, stddev_values);
}
else if (channels == 3) {
maskedMeanCnKernel<float, float3, float3><<<grid, block, 0, stream>>>(src,
rows, cols, channels, src_stride, mask, mask_stride, blocks,
mean_values);
maskedDevCnKernel<float, float3, float3><<<grid, block, 0, stream>>>(
src, rows, cols, channels, src_stride, mask, mask_stride, blocks,
mean_values, stddev_values);
}
else { // channels == 4
maskedMeanCnKernel<float, float4, float4><<<grid, block, 0, stream>>>(src,
rows, cols, channels, src_stride, mask, mask_stride, blocks,
mean_values);
maskedDevCnKernel<float, float4, float4><<<grid, block, 0, stream>>>(
src, rows, cols, channels, src_stride, mask, mask_stride, blocks,
mean_values, stddev_values);
}
}
cudaError_t code = cudaGetLastError();
if (code != cudaSuccess) {
LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code);
return RC_DEVICE_RUNTIME_ERROR;
}
return RC_SUCCESS;
}
template <>
RetCode MeanStdDev<uchar, 1>(cudaStream_t stream,
int height,
int width,
int inWidthStride,
const uchar* inData,
float* outMean,
float* outStdDev,
int maskWidthStride,
const uchar* mask) {
RetCode code = meanStdDev(inData, height, width, 1, inWidthStride, mask,
maskWidthStride, outMean, outStdDev, stream);
return code;
}
template <>
RetCode MeanStdDev<uchar, 3>(cudaStream_t stream,
int height,
int width,
int inWidthStride,
const uchar* inData,
float* outMean,
float* outStdDev,
int maskWidthStride,
const uchar* mask) {
RetCode code = meanStdDev(inData, height, width, 3, inWidthStride, mask,
maskWidthStride, outMean, outStdDev, stream);
return code;
}
template <>
RetCode MeanStdDev<uchar, 4>(cudaStream_t stream,
int height,
int width,
int inWidthStride,
const uchar* inData,
float* outMean,
float* outStdDev,
int maskWidthStride,
const uchar* mask) {
RetCode code = meanStdDev(inData, height, width, 4, inWidthStride, mask,
maskWidthStride, outMean, outStdDev, stream);
return code;
}
template <>
RetCode MeanStdDev<float, 1>(cudaStream_t stream,
int height,
int width,
int inWidthStride,
const float* inData,
float* outMean,
float* outStdDev,
int maskWidthStride,
const uchar* mask) {
inWidthStride *= sizeof(float);
RetCode code = meanStdDev(inData, height, width, 1, inWidthStride, mask,
maskWidthStride, outMean, outStdDev, stream);
return code;
}
template <>
RetCode MeanStdDev<float, 3>(cudaStream_t stream,
int height,
int width,
int inWidthStride,
const float* inData,
float* outMean,
float* outStdDev,
int maskWidthStride,
const uchar* mask) {
inWidthStride *= sizeof(float);
RetCode code = meanStdDev(inData, height, width, 3, inWidthStride, mask,
maskWidthStride, outMean, outStdDev, stream);
return code;
}
template <>
RetCode MeanStdDev<float, 4>(cudaStream_t stream,
int height,
int width,
int inWidthStride,
const float* inData,
float* outMean,
float* outStdDev,
int maskWidthStride,
const uchar* mask) {
inWidthStride *= sizeof(float);
RetCode code = meanStdDev(inData, height, width, 4, inWidthStride, mask,
maskWidthStride, outMean, outStdDev, stream);
return code;
}
} // namespace cuda
} // namespace cv
} // namespace ppl
|
the_stack
|
#if ( defined GPU && MODEL == HYDRO && FLU_SCHEME == RTVD )
// check before compiling anything else
#if ( NCOMP_PASSIVE != 0 )
# error : RTVD scheme does NOT support passive scalars !!
#endif
#include "CUFLU_Shared_FluUtility.cu"
#include "CUDA_ConstMemory.h"
#define to1D1(z,y,x) ( __umul24(z, FLU_NXT*FLU_NXT) + __umul24(y, FLU_NXT) + x )
#define to1D2(z,y,x) ( __umul24(z-FLU_GHOST_SIZE, PS2*PS2) + __umul24(y-FLU_GHOST_SIZE, PS2) + x-FLU_GHOST_SIZE )
static __device__ void CUFLU_Advance( real g_Fluid_In [][5][ CUBE(FLU_NXT) ],
real g_Fluid_Out[][5][ CUBE(PS2) ],
real g_Flux[][9][5][ SQR(PS2) ],
const real dt, const real _dh, const bool StoreFlux,
const int j_gap, const int k_gap, real s_cu[][5][FLU_NXT],
real s_cw[][5][FLU_NXT], real s_flux[][5][FLU_NXT], real s_RLflux[][5][FLU_NXT],
const bool FinalOut, const int XYZ,
const real MinDens, const real MinPres, const real MinEint,
const EoS_t *EoS );
//-------------------------------------------------------------------------------------------------------
// Function : CUFLU_FluidSolver_RTVD
// Description : GPU fluid solver based on the relaxing TVD (RTVD) scheme
//
// Note : a. Prefix "g" for pointers pointing to the "Global" memory space
// Prefix "s" for pointers pointing to the "Shared" memory space
// b. The three-dimensional evolution is achieved by using the dimensional-split method
//
// Parameter : g_Fluid_In : Global memory array to store the input fluid variables
// g_Fluid_Out : Global memory array to store the output fluid variables
// g_Flux : Global memory array to store the output fluxes
// g_Corner : Global memory array storing the physical corner coordinates of each patch group (USELESS CURRENTLY)
// g_Pot_USG : Global memory array storing the input potential for UNSPLIT_GRAVITY (NOT SUPPORTED in RTVD)
// dt : Time interval to advance solution
// _dh : 1 / grid size
// StoreFlux : true --> store the coarse-fine fluxes
// XYZ : true : x->y->z ( forward sweep)
// false : z->y->x (backward sweep)
// MinDens : Density floor
// MinPres : Pressure floor
// MinEint : Internal energy floor
// EoS : EoS object
//-------------------------------------------------------------------------------------------------------
__global__ void CUFLU_FluidSolver_RTVD(
real g_Fluid_In [][NCOMP_TOTAL][ CUBE(FLU_NXT) ],
real g_Fluid_Out[][NCOMP_TOTAL][ CUBE(PS2) ],
real g_Flux [][9][NCOMP_TOTAL][ SQR(PS2) ],
const double g_Corner[][3],
const real g_Pot_USG[][ CUBE(USG_NXT_F) ],
const real dt, const real _dh, const bool StoreFlux,
const bool XYZ, const real MinDens, const real MinPres, const real MinEint,
const EoS_t EoS )
{
__shared__ real s_cu [FLU_BLOCK_SIZE_Y][5][FLU_NXT];
__shared__ real s_cw [FLU_BLOCK_SIZE_Y][5][FLU_NXT];
__shared__ real s_flux [FLU_BLOCK_SIZE_Y][5][FLU_NXT];
__shared__ real s_RLflux[FLU_BLOCK_SIZE_Y][5][FLU_NXT];
if ( XYZ )
{
CUFLU_Advance( g_Fluid_In, g_Fluid_Out, g_Flux, dt, _dh, StoreFlux, 0, 0,
s_cu, s_cw, s_flux, s_RLflux, false, 0, MinDens, MinPres, MinEint, &EoS );
CUFLU_Advance( g_Fluid_In, g_Fluid_Out, g_Flux, dt, _dh, StoreFlux, FLU_GHOST_SIZE, 0,
s_cu, s_cw, s_flux, s_RLflux, false, 3, MinDens, MinPres, MinEint, &EoS );
CUFLU_Advance( g_Fluid_In, g_Fluid_Out, g_Flux, dt, _dh, StoreFlux, FLU_GHOST_SIZE, FLU_GHOST_SIZE,
s_cu, s_cw, s_flux, s_RLflux, true, 6, MinDens, MinPres, MinEint, &EoS );
}
else
{
CUFLU_Advance( g_Fluid_In, g_Fluid_Out, g_Flux, dt, _dh, StoreFlux, 0, 0,
s_cu, s_cw, s_flux, s_RLflux, false, 6, MinDens, MinPres, MinEint, &EoS );
CUFLU_Advance( g_Fluid_In, g_Fluid_Out, g_Flux, dt, _dh, StoreFlux, 0, FLU_GHOST_SIZE,
s_cu, s_cw, s_flux, s_RLflux, false, 3, MinDens, MinPres, MinEint, &EoS );
CUFLU_Advance( g_Fluid_In, g_Fluid_Out, g_Flux, dt, _dh, StoreFlux, FLU_GHOST_SIZE, FLU_GHOST_SIZE,
s_cu, s_cw, s_flux, s_RLflux, true, 0, MinDens, MinPres, MinEint, &EoS );
}
} // FUNCTION : CUFLU_FluidSolver_RTVD
//-------------------------------------------------------------------------------------------------------
// Function : CUFLU_Advance
// Description : GPU device function, which performs a one-dimensional sweep based on the TVD scheme
//
// Note : a. Prefix "g" for pointers pointing to the "Global" memory space
// Prefix "s" for pointers pointing to the "Shared" memory space
// b. The direction of the one dimensional sweep is determined by the input parameter "XYZ"
//
// Parameter : g_Fluid_In : Global memory array to store the input fluid variables
// g_Fluid_Out : Global memory array to store the output fluid variables
// g_Flux : Global memory array to store the output fluxes
// dt : Time interval to advance solution
// _dh : 1 / grid size
// StoreFlux : true --> store the coarse-fine fluxes
// j_gap : Number of useless grids in each side in the j direction (j may not be equal to y)
// k_gap : Number of useless grids in each side in the k direction (k mya not be equal to z)
// s_cu : Shared memory array storing the normal flux
// s_cw : Shared memory array storing the auxiliary flux
// s_flux : Shared memory array storing the final flux used to update the fluid variables
// s_RLflux : Shared memory array storing the left/right-moving flux
// XYZ : 0 : Update the solution in the x direction
// 3 : Update the solution in the y direction
// 6 : Update the solution in the z direction
// --> This parameter is also used to determine the place to store the output fluxes
// MinDens : Density floor
// MinPres : Pressure floor
// MinEint : Internal energy floor
// EoS : EoS object
//-------------------------------------------------------------------------------------------------------
__device__ void CUFLU_Advance( real g_Fluid_In [][5][ CUBE(FLU_NXT) ],
real g_Fluid_Out[][5][ CUBE(PS2) ],
real g_Flux[][9][5][ SQR(PS2) ],
const real dt, const real _dh, const bool StoreFlux,
const int j_gap, const int k_gap, real s_cu[][5][FLU_NXT],
real s_cw[][5][FLU_NXT], real s_flux[][5][FLU_NXT], real s_RLflux[][5][FLU_NXT],
const bool FinalOut, const int XYZ,
const real MinDens, const real MinPres, const real MinEint,
const EoS_t *EoS )
{
const uint bx = blockIdx.x;
const uint tx = threadIdx.x;
const uint ty = threadIdx.y;
const uint dj = blockDim.y;
const uint size_j = FLU_NXT - (j_gap<<1);
const uint size_k = FLU_NXT - (k_gap<<1);
const uint NColumn = __umul24( size_j, size_k );
const uint i = tx; // (i,j) the element in shared memory under evaluation
const uint ip = i+1;
const uint im = i-1;
uint j = j_gap + ty%size_j;
uint k = k_gap + ty/size_j;
uint Column0 = 0; // the total number of columns that have been updated
const uint j_end = FLU_NXT - j_gap;
const uint k_end = FLU_NXT - k_gap;
const real dt_half = (real)0.5*dt;
const real *Passive = NULL; // RTVD does not support passive scalars
bool RuleOut = false;
const bool CheckMinPres_Yes = true;
real _rho, vx, p, c, Temp, Fluid[5], Fluid_half[5];
int ID1, ID2, ID3, Comp[5], delta_k;
// set the order of component for update in different directions
switch ( XYZ )
{
case 0: Comp[0] = 0; Comp[1] = 1; Comp[2] = 2; Comp[3] = 3; Comp[4] = 4; break;
case 3: Comp[0] = 0; Comp[1] = 2; Comp[2] = 1; Comp[3] = 3; Comp[4] = 4; break;
case 6: Comp[0] = 0; Comp[1] = 3; Comp[2] = 2; Comp[3] = 1; Comp[4] = 4; break;
}
// start the TVD scheme
do
{
// determine the array indices for updating in different directions
switch ( XYZ )
{
case 0: ID1 = to1D1( k, j, i ); break;
case 3: ID1 = to1D1( k, i, j ); break;
case 6: ID1 = to1D1( i, k, j ); break;
}
// load data into per-thread registers
for (int v=0; v<5; v++) Fluid[v] = g_Fluid_In[bx][ Comp[v] ][ID1];
// a. Evaluate the half-step values of fluid variables
//-----------------------------------------------------------------------------
// (a1). set variables defined in the center of cell
_rho = (real)1.0 / Fluid[0];
vx = _rho * Fluid[1];
p = Hydro_Con2Pres( Fluid[0], Fluid[1], Fluid[2], Fluid[3], Fluid[4], Passive,
CheckMinPres_Yes, MinPres, NULL_REAL, EoS->DensEint2Pres_FuncPtr,
EoS->AuxArrayDevPtr_Flt, EoS->AuxArrayDevPtr_Int, EoS->Table, NULL );
# ifdef CHECK_NEGATIVE_IN_FLUID
if ( Hydro_CheckNegative(p) )
printf( "ERROR : negative pressure (%14.7e) at file <%s>, line <%d>, function <%s>\n",
p, __FILE__, __LINE__, __FUNCTION__ );
if ( Hydro_CheckNegative(Fluid[0]) )
printf( "ERROR : negative density (%14.7e) at file <%s>, line <%d>, function <%s>\n",
Fluid[0], __FILE__, __LINE__, __FUNCTION__ );
# endif
c = FABS( vx ) + SQRT( EoS->DensPres2CSqr_FuncPtr( Fluid[0], p, Passive, EoS->AuxArrayDevPtr_Flt,
EoS->AuxArrayDevPtr_Int, EoS->Table ) );
s_cw[ty][0][i] = Fluid[1];
s_cw[ty][1][i] = Fluid[1]*vx + p;
s_cw[ty][2][i] = Fluid[2]*vx;
s_cw[ty][3][i] = Fluid[3]*vx;
s_cw[ty][4][i] = ( Fluid[4]+p )*vx;
s_cu[ty][0][i] = c*Fluid[0];
s_cu[ty][1][i] = c*Fluid[1];
s_cu[ty][2][i] = c*Fluid[2];
s_cu[ty][3][i] = c*Fluid[3];
s_cu[ty][4][i] = c*Fluid[4];
__syncthreads();
// (a2). set flux defined in the right-hand surface of cell by the upwind scheme
if ( i < FLU_NXT-1 )
{
for (int v=0; v<5; v++)
s_flux[ty][v][i] = (real)0.5*( ( s_cu[ty][v][i ]+s_cw[ty][v][i ] ) -
( s_cu[ty][v][ip]-s_cw[ty][v][ip] ) );
}
__syncthreads();
// (a3). evaluate the intermidiate values (u_half)
// if ( i > 0 )
if ( i > 0 && i < FLU_NXT-1 )
{
for (int v=0; v<5; v++) Fluid_half[v] = Fluid[v] - _dh*dt_half*( s_flux[ty][v][i] - s_flux[ty][v][im] ) ;
// apply density and internal energy floors
Fluid_half[0] = FMAX( Fluid_half[0], MinDens );
Fluid_half[4] = Hydro_CheckMinEintInEngy( Fluid_half[0], Fluid_half[1], Fluid_half[2], Fluid_half[3], Fluid_half[4],
MinEint, NULL_REAL );
}
// Evaluate the full-step values of fluid variables
//-----------------------------------------------------------------------------
// (b1). reset variables defined in the center of cell at the intermidate state
if ( i > 0 && i < FLU_NXT-1 )
{
_rho = (real)1.0 / Fluid_half[0];
vx = _rho * Fluid_half[1];
p = Hydro_Con2Pres( Fluid_half[0], Fluid_half[1], Fluid_half[2], Fluid_half[3], Fluid_half[4], Passive,
CheckMinPres_Yes, MinPres, NULL_REAL, EoS->DensEint2Pres_FuncPtr,
EoS->AuxArrayDevPtr_Flt, EoS->AuxArrayDevPtr_Int, EoS->Table, NULL );
# ifdef CHECK_NEGATIVE_IN_FLUID
if ( Hydro_CheckNegative(p) )
printf( "ERROR : negative pressure (%14.7e) at file <%s>, line <%d>, function <%s>\n",
p, __FILE__, __LINE__, __FUNCTION__ );
if ( Hydro_CheckNegative(Fluid_half[0]) )
printf( "ERROR : negative density (%14.7e) at file <%s>, line <%d>, function <%s>\n",
Fluid_half[0], __FILE__, __LINE__, __FUNCTION__ );
# endif
c = FABS( vx ) + SQRT( EoS->DensPres2CSqr_FuncPtr( Fluid_half[0], p, Passive, EoS->AuxArrayDevPtr_Flt,
EoS->AuxArrayDevPtr_Int, EoS->Table ) );
s_cw[ty][0][i] = Fluid_half[1];
s_cw[ty][1][i] = Fluid_half[1]*vx + p;
s_cw[ty][2][i] = Fluid_half[2]*vx;
s_cw[ty][3][i] = Fluid_half[3]*vx;
s_cw[ty][4][i] = ( Fluid_half[4]+p )*vx;
s_cu[ty][0][i] = c*Fluid_half[0];
s_cu[ty][1][i] = c*Fluid_half[1];
s_cu[ty][2][i] = c*Fluid_half[2];
s_cu[ty][3][i] = c*Fluid_half[3];
s_cu[ty][4][i] = c*Fluid_half[4];
} // if ( i > 0 && i < FLU_NXT-1 )
// (b2). set the right-moving flux defined in the right-hand surface by the TVD scheme
if ( i > 0 && i < FLU_NXT-2 )
{
for (int v=0; v<5; v++) s_RLflux[ty][v][i] = (real)0.5*( s_cu[ty][v][i] + s_cw[ty][v][i] );
}
__syncthreads();
if ( i > 1 && i < FLU_NXT-3 )
{
for (int v=0; v<5; v++)
{
s_flux[ty][v][i] = s_RLflux[ty][v][i];
Temp = ( s_RLflux[ty][v][ip]-s_RLflux[ty][v][i ] )
* ( s_RLflux[ty][v][i ]-s_RLflux[ty][v][im] );
if ( Temp > (real)0.0 )
s_flux[ty][v][i] += Temp / ( s_RLflux[ty][v][ip]-s_RLflux[ty][v][im] );
}
}
__syncthreads();
// (b3). set the left-moving flux defined in the left-hand surface by the TVD scheme, get the total flux
// if ( i < FLU_NXT-2 )
if ( i > 0 && i < FLU_NXT-2 )
{
for (int v=0; v<5; v++) s_RLflux[ty][v][i] = (real)0.5*( s_cu[ty][v][ip] - s_cw[ty][v][ip] );
}
__syncthreads();
if ( i > 1 && i < FLU_NXT-3 )
{
for (int v=0; v<5; v++)
{
s_flux[ty][v][i] -= s_RLflux[ty][v][i];
Temp = ( s_RLflux[ty][v][im]-s_RLflux[ty][v][i ] )
* ( s_RLflux[ty][v][i ]-s_RLflux[ty][v][ip] );
if ( Temp > (real)0.0 )
s_flux[ty][v][i] -= Temp / ( s_RLflux[ty][v][im]-s_RLflux[ty][v][ip] );
}
}
__syncthreads();
// (b4). advance fluid by one full time-step
// if ( i > 2 )
// if ( i > 2 && i < FLU_NXT-3 )
if ( i > 2 && i < FLU_NXT-3 && RuleOut == false )
{
for (int v=0; v<5; v++) Fluid[v] -= _dh*dt*( s_flux[ty][v][i] - s_flux[ty][v][im] ) ;
// apply density and internal energy floors
Fluid[0] = FMAX( Fluid[0], MinDens );
Fluid[4] = Hydro_CheckMinEintInEngy( Fluid[0], Fluid[1], Fluid[2], Fluid[3], Fluid[4],
MinEint, NULL_REAL );
// check negative density and energy
# ifdef CHECK_NEGATIVE_IN_FLUID
if ( Hydro_CheckNegative(Fluid[0]) )
printf( "ERROR : negative density (%14.7e) at file <%s>, line <%d>, function <%s>\n",
Fluid[0], __FILE__, __LINE__, __FUNCTION__ );
if ( Hydro_CheckNegative(Fluid[4]) )
printf( "ERROR : negative energy (%14.7e) at file <%s>, line <%d>, function <%s>\n",
Fluid[4], __FILE__, __LINE__, __FUNCTION__ );
# endif
// store the updated data back to the global memory
if ( FinalOut )
{
switch ( XYZ )
{
case 0: ID2 = to1D2( k, j, i ); break;
case 3: ID2 = to1D2( k, i, j ); break;
case 6: ID2 = to1D2( i, k, j ); break;
}
for (int v=0; v<5; v++) g_Fluid_Out[bx][ Comp[v] ][ID2] = Fluid[v];
}
else
for (int v=0; v<5; v++) g_Fluid_In [bx][ Comp[v] ][ID1] = Fluid[v];
}
// (b5). save the flux required by the flux-correction operation
if ( StoreFlux )
if ( k >= FLU_GHOST_SIZE && k < FLU_NXT-FLU_GHOST_SIZE )
if ( j >= FLU_GHOST_SIZE && j < FLU_NXT-FLU_GHOST_SIZE )
if ( i == 0 )
{
ID3 = __umul24( k-FLU_GHOST_SIZE, PS2 ) + (j-FLU_GHOST_SIZE);
for (int v=0; v<5; v++)
{
g_Flux[bx][XYZ+0][v][ID3] = s_flux[ty][ Comp[v] ][ 2];
g_Flux[bx][XYZ+1][v][ID3] = s_flux[ty][ Comp[v] ][FLU_NXT/2-1];
g_Flux[bx][XYZ+2][v][ID3] = s_flux[ty][ Comp[v] ][FLU_NXT - 4];
}
}
// reset the target array indices
j += dj;
if ( j >= j_end )
{
delta_k = ( j - j_end )/size_j + 1;
k += delta_k;
j -= __umul24( size_j, delta_k );
}
Column0 += dj;
// if the index k exceeds the maximum allowed value --> reset (j,k) to harmless values and wait for other
// threads (all threads must exist the while loop "at the same time", otherwise __syncthreads will fail !!)
if ( k >= k_end )
{
j = 0;
k = 0;
RuleOut = true;
}
__syncthreads();
}
while ( Column0 < NColumn );
} // FUNCTION : CUFLU_Advance
#endif // #if ( defined GPU && MODEL == HYDRO && FLU_SCHEME == RTVD )
|
the_stack
|
#include <nvidia/helper_cuda.h>
//#define BLK_SIZE 128
template<typename T, int32_t B>
__device__
inline T integralGetCompLoc(T* A, int32_t i, int32_t j, int32_t w, int32_t h)
{
const int32_t iNeg = max(i-B,0)*(w+1);
const int32_t jNeg = max(j-B,0);
const int32_t iPos = min(i+B,h)*(w+1);
const int32_t jPos = min(j+B,w);
// printf("%d %d %d %d %d\n",iNeg/w,jNeg,iPos/w,jPos,B);
const T a = A[iPos + jPos] // (min(i+w,A.rows-1),min(j+w,A.cols-1))
- A[iPos + jNeg] //.at<T>(min(i+w,A.rows-1),max(j-w,0))
- A[iNeg + jPos] // .at<T>(max(i-w,0),min(j+w,A.cols-1))
+ A[iNeg + jNeg];//.at<T>(max(i-w,0),max(j-w,0));
// if (print) printf("%d %d %d %d %d\n",iNeg/w,jNeg,iPos/w,jPos,a);
return a;
};
template<typename T>
__device__
inline T integralGet(T* A, int32_t lu, int32_t ld, int32_t rd, int32_t ru)
{
return A[rd] - A[ru] - A[ld] + A[lu];
};
template<int32_t B>
__device__
inline bool integralCheck(uint8_t* haveData, int32_t i, int32_t j, int32_t
w, int32_t h, int32_t* lu, int32_t* ld, int32_t* rd, int32_t* ru)
{
int32_t iN = max(i-2-B,0);
int32_t iP = min(i+1+B,h);
int32_t jN = max(j-2-B,0);
int32_t jP = min(j+1+B,w);
*lu = iN*(w+1) + jN;
*ru = iN*(w+1) + jP;
*ld = iP*(w+1) + jN;
*rd = iP*(w+1) + jP;
// while(iN < i && (!haveData[*ru] || !haveData[*lu]))
// {
// iN ++;
// *lu +=w;
// *ru +=w;
// }
// while(i < iP && (!haveData[*rd] || !haveData[*ld]))
// {
// iP --;
// *ld -=w;
// *rd -=w;
// }
//
// while(jN < j && (!haveData[*lu] || !haveData[*ld]))
// {
// jN ++;
// *lu +=1;
// *ld +=1;
// }
//
// while(j < jP && (!haveData[*ru] || !haveData[*rd]))
// {
// jP --;
// *ru -=1;
// *rd -=1;
// }
//
// if(!haveData[*lu] || !haveData[*ld] || !haveData[*ru] || !haveData[*rd]
// || iN ==0 || jN == 0 || iP == h-1 || jP == w-1)
// {
//// printf("%d %d: %d %d %d %d\n",i,j,iN,jN,iP,jP);
////
//// TODO: testing
////
//// return false;
// }
//
//
// *lu += iN;
// *ru += iN;
// *ld += iP;
// *rd += iP;
// *lu = iN*(w+1) + jN;
// *ru = iN*(w+1) + jP;
// *ld = iP*(w+1) + jN;
// *rd = iP*(w+1) + jP;
// *lu += max(i-B,0);
// *ru += max(i-B,0) - jPos + jPosS;
// *ld += min(i+B,h) + iPos - iPosS;
// *rd += min(i+B,h) + iPos + jPos - iPosS - jPosS;
return true;
};
template<typename T, uint32_t BLK_SIZE, int32_t B>
__global__ void guidedFilter_ab_kernel(uint8_t* haveData, uint8_t*
haveDataAfter, T* a, T* b, int32_t* Ns, T* dIntSum, T* dIntSqSum, double
eps, uint32_t w, uint32_t h)
{
const int idx = threadIdx.x + blockIdx.x*blockDim.x;
const int idy = threadIdx.y + blockIdx.y*blockDim.y;
const int id = idx+w*idy;
if((idx<w)&&(idy<h))
// if(haveData[id])
{
int32_t lu,ld,rd,ru;
if(!integralCheck<B>(haveData,idy,idx,w,h,&lu,&ld,&rd,&ru)) // get the coordinates for integral img
{
haveDataAfter[id] = 0;
// b[id] = 0.0;
// a[id] = 1.0;
return;
}
const T n = integralGet<int32_t>(Ns,lu,ld,rd,ru);
if (n<B)//*(B-1))
{
b[id] = 0.0;
a[id] = 0.0;
haveDataAfter[id] = 0;
// else if(n==1.0)
// {
// const T dSum = integralGet<T>(dIntSum,lu,ld,rd,ru);
// b[id] = dSum/n;
// a[id] = 0.0;
// haveDataAfter[id] = 1;
}else{
// if(n < (2*B)*(2*B)) { haveDataAfter[id] = 0;
//// haveData[id] =0;
//// b[id] = 0.0;
//// a[id] = 1.0;
// return;
// }
//
// -------------- old ----------------
// const T muG = integralGet<T>(dIntSum,lu,ld,rd,ru);
// const T s = integralGet<T>(dIntSqSum,lu,ld,rd,ru);
// const T muSq = muG*muG;
// const T n1 = n-1.;
// const T a_ = ((n*s-muSq)*n1)/((s*n-muSq+eps*n1*n)*n);
// -------------- old ----------------
const T dSum = integralGet<T>(dIntSum,lu,ld,rd,ru);
const T S = integralGet<T>(dIntSqSum,lu,ld,rd,ru);
const T dSumSq = dSum*dSum;
const T n1 = n-1.;
const T a_ = (n*S-dSumSq)/(n*n/n1*S-dSumSq+n*n*eps);
b[id] = dSum/n*(1. - a_);
a[id] = a_;
haveDataAfter[id] = 1;
}
// }else{
// b[id] = 0.0;
// a[id] = 0.0;
}
}
void guidedFilter_ab_gpu(uint8_t* haveData, uint8_t* haveDataAfter,
double* a, double* b, int32_t* Ns, double* dSum, double* dSqSum,
double eps, uint32_t B, uint32_t w, uint32_t h)
{
dim3 threadsSq(16,16,1);
dim3 blocksSq(w/16+(w%16>0?1:0), h/16+(h%16>0?1:0),1);
if(B==3){
guidedFilter_ab_kernel<double,16,3><<<blocksSq,threadsSq>>>(haveData,haveDataAfter,a,b,Ns,dSum,dSqSum,eps,w,h);
}else if(B==5){
guidedFilter_ab_kernel<double,16,5><<<blocksSq,threadsSq>>>(haveData,haveDataAfter,a,b,Ns,dSum,dSqSum,eps,w,h);
}else if(B==6){
guidedFilter_ab_kernel<double,16,6><<<blocksSq,threadsSq>>>(haveData,haveDataAfter,a,b,Ns,dSum,dSqSum,eps,w,h);
}else if(B==7){
guidedFilter_ab_kernel<double,16,7><<<blocksSq,threadsSq>>>(haveData,haveDataAfter,a,b,Ns,dSum,dSqSum,eps,w,h);
}else if(B==8){
guidedFilter_ab_kernel<double,16,8><<<blocksSq,threadsSq>>>(haveData,haveDataAfter,a,b,Ns,dSum,dSqSum,eps,w,h);
}else if(B==9){
guidedFilter_ab_kernel<double,16,9><<<blocksSq,threadsSq>>>(haveData,haveDataAfter,a,b,Ns,dSum,dSqSum,eps,w,h);
}else if(B==10){
guidedFilter_ab_kernel<double,16,10><<<blocksSq,threadsSq>>>(haveData,haveDataAfter,a,b,Ns,dSum,dSqSum,eps,w,h);
}
checkCudaErrors(cudaDeviceSynchronize());
};
template<typename T, typename Tout, uint32_t BLK_SIZE, int32_t B>
__global__ void guidedFilter_out_kernel(uint8_t* haveData, T* depth, T* aInt, T* bInt, int32_t*
Ns, Tout* depthSmooth, uint32_t w, uint32_t h, double missingValue)
{
const int idx = threadIdx.x + blockIdx.x*blockDim.x;
const int idy = threadIdx.y + blockIdx.y*blockDim.y;
const int id = idx+w*idy;
if((idx<w)&&(idy<h))
if(haveData[id])
{
int32_t lu,ld,rd,ru;
if(!integralCheck<B>(haveData,idy,idx,w,h,&lu,&ld,&rd,&ru)) // get the coordinates for integral img
{
depthSmooth[id] = missingValue;
// haveData[id] =0;
return;
}
// integralCheck<B>(haveData,idy,idx,w,h,&lu,&ld,&rd,&ru);
const T n = integralGet<int32_t>(Ns,lu,ld,rd,ru);
if(n<B)//*B-1)
{
depthSmooth[id] = missingValue;
}else{
// if(n < (2*B)*(2*B)) { depthSmooth[id] = 0.0;
//// haveData[id] =0;
// return;
// }
const T muA = integralGet<T>(aInt,lu,ld,rd,ru);
const T muB = integralGet<T>(bInt,lu,ld,rd,ru);
depthSmooth[id] = (muA*depth[id] + muB)/n;
}
}else{
depthSmooth[id] = missingValue;
}
}
void guidedFilter_out_gpu(uint8_t* haveData, double* depth, double* aInt,
double* bInt, int32_t* Ns, float* depthSmooth, uint32_t B, uint32_t w,
uint32_t h)
{
dim3 threadsSq(16,16,1);
dim3 blocksSq(w/16+(w%16>0?1:0), h/16+(h%16>0?1:0),1);
if(B==1){
guidedFilter_out_kernel<double,float,16,1><<<blocksSq,threadsSq>>>(haveData,depth,aInt,bInt,Ns,depthSmooth,w,h,0.0f/0.0f);
}else if(B==2){
guidedFilter_out_kernel<double,float,16,2><<<blocksSq,threadsSq>>>(haveData,depth,aInt,bInt,Ns,depthSmooth,w,h,0.0f/0.0f);
}else if(B==3){
guidedFilter_out_kernel<double,float,16,3><<<blocksSq,threadsSq>>>(haveData,depth,aInt,bInt,Ns,depthSmooth,w,h,0.0f/0.0f);
}else if(B==4){
guidedFilter_out_kernel<double,float,16,4><<<blocksSq,threadsSq>>>(haveData,depth,aInt,bInt,Ns,depthSmooth,w,h,0.0f/0.0f);
}else if(B==5){
guidedFilter_out_kernel<double,float,16,5><<<blocksSq,threadsSq>>>(haveData,depth,aInt,bInt,Ns,depthSmooth,w,h,0.0f/0.0f);
}else if(B==6){
guidedFilter_out_kernel<double,float,16,6><<<blocksSq,threadsSq>>>(haveData,depth,aInt,bInt,Ns,depthSmooth,w,h,0.0f/0.0f);
}else if(B==7){
guidedFilter_out_kernel<double,float,16,7><<<blocksSq,threadsSq>>>(haveData,depth,aInt,bInt,Ns,depthSmooth,w,h,0.0f/0.0f);
}else if(B==8){
guidedFilter_out_kernel<double,float,16,8><<<blocksSq,threadsSq>>>(haveData,depth,aInt,bInt,Ns,depthSmooth,w,h,0.0f/0.0f);
}else if(B==9){
guidedFilter_out_kernel<double,float,16,9><<<blocksSq,threadsSq>>>(haveData,depth,aInt,bInt,Ns,depthSmooth,w,h,0.0f/0.0f);
}else if(B==10){
guidedFilter_out_kernel<double,float,16,10><<<blocksSq,threadsSq>>>(haveData,depth,aInt,bInt,Ns,depthSmooth,w,h,0.0f/0.0f);
}
checkCudaErrors(cudaDeviceSynchronize());
};
void guidedFilter_out_gpu(uint8_t* haveData, double* depth, double* aInt,
double* bInt, int32_t* Ns, double* depthSmooth, uint32_t B, uint32_t w,
uint32_t h)
{
dim3 threadsSq(16,16,1);
dim3 blocksSq(w/16+(w%16>0?1:0), h/16+(h%16>0?1:0),1);
if(B==1){
guidedFilter_out_kernel<double,double,16,1><<<blocksSq,threadsSq>>>(haveData,depth,aInt,bInt,Ns,depthSmooth,w,h,0.0/0.0);
}else if(B==2){
guidedFilter_out_kernel<double,double,16,2><<<blocksSq,threadsSq>>>(haveData,depth,aInt,bInt,Ns,depthSmooth,w,h,0.0/0.0);
}else if(B==3){
guidedFilter_out_kernel<double,double,16,3><<<blocksSq,threadsSq>>>(haveData,depth,aInt,bInt,Ns,depthSmooth,w,h,0.0/0.0);
}else if(B==4){
guidedFilter_out_kernel<double,double,16,4><<<blocksSq,threadsSq>>>(haveData,depth,aInt,bInt,Ns,depthSmooth,w,h,0.0/0.0);
}else if(B==5){
guidedFilter_out_kernel<double,double,16,5><<<blocksSq,threadsSq>>>(haveData,depth,aInt,bInt,Ns,depthSmooth,w,h,0.0/0.0);
}else if(B==6){
guidedFilter_out_kernel<double,double,16,6><<<blocksSq,threadsSq>>>(haveData,depth,aInt,bInt,Ns,depthSmooth,w,h,0.0/0.0);
}else if(B==7){
guidedFilter_out_kernel<double,double,16,7><<<blocksSq,threadsSq>>>(haveData,depth,aInt,bInt,Ns,depthSmooth,w,h,0.0/0.0);
}else if(B==8){
guidedFilter_out_kernel<double,double,16,8><<<blocksSq,threadsSq>>>(haveData,depth,aInt,bInt,Ns,depthSmooth,w,h,0.0/0.0);
}else if(B==9){
guidedFilter_out_kernel<double,double,16,9><<<blocksSq,threadsSq>>>(haveData,depth,aInt,bInt,Ns,depthSmooth,w,h,0.0/0.0);
}else if(B==10){
guidedFilter_out_kernel<double,double,16,10><<<blocksSq,threadsSq>>>(haveData,depth,aInt,bInt,Ns,depthSmooth,w,h,0.0/0.0);
}
checkCudaErrors(cudaDeviceSynchronize());
};
// ------------------------------- testing -------------------------------------
template<typename T, uint32_t BLK_SIZE, int32_t B>
__global__ void guidedFilter_ab_kernel(T* depth,uint8_t* haveData,
uint8_t* haveDataAfter, T* a, T* b, int32_t* Ns, T* dSum, T*
dSqSum, double eps, uint32_t w, uint32_t h)
{
const int idx = threadIdx.x + blockIdx.x*blockDim.x;
const int idy = threadIdx.y + blockIdx.y*blockDim.y;
const int id = idx+w*idy;
if((idx<w)&&(idy<h) && (haveData[id]))
{
int32_t lu,ld,rd,ru;
if(!integralCheck<B>(haveData,idy,idx,w,h,&lu,&ld,&rd,&ru)) // get the coordinates for integral img
{
haveDataAfter[id] = 0;
return;
}
const T n = integralGet<int32_t>(Ns,lu,ld,rd,ru);
// if(n < (2*B)*(2*B))
// {
// haveDataAfter[id] = 0;
// return;
// }
const T muG = integralGet<T>(dSum,lu,ld,rd,ru);
const T s = integralGet<T>(dSqSum,lu,ld,rd,ru);
const T muSq = muG*muG;
const T n1 = n-1.;
const T z = depth[id];
T epsT = eps;
if(eps <= 0.)
{
epsT = 0.0012+0.0019*(z-0.4)*(z-0.4)+0.0001/sqrt(z); // leaving out the noise by angle
epsT *=5.;
if(idx==300 && idy == 300)
printf("eps=%f",epsT);
}
const T a_ = ((n*s-muSq)*n1)/((s*n-muSq+epsT*n1*n)*n);
b[id] = muG*(1. - a_)/n;
a[id] = a_;
haveDataAfter[id] = 1;
}
}
void guidedFilter_ab_gpu(double* depth, uint8_t* haveData, uint8_t*
haveDataAfter, double* a, double* b, int32_t* Ns, double* dSum, double*
dSqSum, double eps, uint32_t B, uint32_t w, uint32_t h)
{
dim3 threadsSq(16,16,1);
dim3 blocksSq(w/16+(w%16>0?1:0), h/16+(h%16>0?1:0),1);
if(B==3){
guidedFilter_ab_kernel<double,16,3><<<blocksSq,threadsSq>>>(depth,haveData,haveDataAfter,a,b,Ns,dSum,dSqSum,eps,w,h);
}else if(B==5){
guidedFilter_ab_kernel<double,16,5><<<blocksSq,threadsSq>>>(depth,haveData,haveDataAfter,a,b,Ns,dSum,dSqSum,eps,w,h);
}else if(B==6){
guidedFilter_ab_kernel<double,16,6><<<blocksSq,threadsSq>>>(depth,haveData,haveDataAfter,a,b,Ns,dSum,dSqSum,eps,w,h);
}else if(B==7){
guidedFilter_ab_kernel<double,16,7><<<blocksSq,threadsSq>>>(depth,haveData,haveDataAfter,a,b,Ns,dSum,dSqSum,eps,w,h);
}else if(B==8){
guidedFilter_ab_kernel<double,16,8><<<blocksSq,threadsSq>>>(depth,haveData,haveDataAfter,a,b,Ns,dSum,dSqSum,eps,w,h);
}else if(B==9){
guidedFilter_ab_kernel<double,16,9><<<blocksSq,threadsSq>>>(depth,haveData,haveDataAfter,a,b,Ns,dSum,dSqSum,eps,w,h);
}else if(B==10){
guidedFilter_ab_kernel<double,16,10><<<blocksSq,threadsSq>>>(depth,haveData,haveDataAfter,a,b,Ns,dSum,dSqSum,eps,w,h);
}
checkCudaErrors(cudaDeviceSynchronize());
};
|
the_stack
|
#include "lite/kernels/cuda/gru_compute.h"
#include <string>
#include <vector>
#include "lite/backends/cuda/cuda_utils.h"
#include "lite/backends/cuda/math/bias.h"
#include "lite/backends/cuda/math/gru_forward.h"
#include "lite/backends/cuda/math/sequence2batch.h"
#include "lite/backends/cuda/target_wrapper.h"
#include "lite/core/op_registry.h"
namespace paddle {
namespace lite {
namespace kernels {
namespace cuda {
template <typename T>
struct GRUMetaValue {
T* gate_weight;
T* state_weight;
T* gate_value;
T* reset_output_value;
T* output_value;
T* prev_out_value;
};
template <typename T>
struct GRUUnitFunctor {
static void compute(GRUMetaValue<T> value,
int frame_size,
int batch_size,
const lite::cuda::math::ActivationType& active_node,
const lite::cuda::math::ActivationType& active_gate,
bool origin_mode,
lite::cuda::math::Gemm<T, T>* blas,
CUDAContext* context) {
dim3 threads, grids;
if (batch_size == 1) {
if (lite::TargetWrapperCuda::GetComputeCapability() >= 70) {
if (frame_size < 16) {
constexpr int tiled_size = 8;
int frame_blocks = (frame_size * 2 + tiled_size - 1) / tiled_size;
threads = dim3(tiled_size, 1);
grids = dim3(frame_blocks, 1);
lite::cuda::math::FastCollectiveGruGate<
T,
tiled_size><<<grids, threads, 0, context->exec_stream()>>>(
value.gate_value,
value.prev_out_value,
value.gate_weight,
value.reset_output_value,
frame_size,
active_gate);
frame_blocks = (frame_size + tiled_size - 1) / tiled_size;
grids = dim3(frame_blocks, 1);
lite::cuda::math::FastCollectiveGruOut<
T,
tiled_size><<<grids, threads, 0, context->exec_stream()>>>(
value.state_weight,
value.prev_out_value,
value.output_value,
value.gate_value,
value.reset_output_value,
frame_size,
active_node,
origin_mode);
} else {
constexpr int tiled_size = 16;
int frame_blocks = (frame_size * 2 + tiled_size - 1) / tiled_size;
threads = dim3(tiled_size, 1);
grids = dim3(frame_blocks, 1);
lite::cuda::math::FastCollectiveGruGate<
T,
tiled_size><<<grids, threads, 0, context->exec_stream()>>>(
value.gate_value,
value.prev_out_value,
value.gate_weight,
value.reset_output_value,
frame_size,
active_gate);
frame_blocks = (frame_size + tiled_size - 1) / tiled_size;
grids = dim3(frame_blocks, 1);
lite::cuda::math::FastCollectiveGruOut<
T,
tiled_size><<<grids, threads, 0, context->exec_stream()>>>(
value.state_weight,
value.prev_out_value,
value.output_value,
value.gate_value,
value.reset_output_value,
frame_size,
active_node,
origin_mode);
}
return;
} else {
int frame_per_block = frame_size <= 1024 ? frame_size : 1024;
int frame_blocks = (frame_size + 1024 - 1) / 1024;
threads = dim3(frame_per_block, 1);
grids = dim3(frame_blocks, 1);
}
} else {
threads = dim3(32, 32);
grids = dim3((frame_size + 32 - 1) / 32, (batch_size + 32 - 1) / 32);
}
if (value.prev_out_value) {
CHECK(blas->init(false,
false,
batch_size,
frame_size * 2,
frame_size,
frame_size,
frame_size * 2,
frame_size * 3,
context));
blas->run(1.0f,
1.0f,
value.prev_out_value,
value.gate_weight,
value.gate_value,
context);
}
lite::cuda::math::GruForwardResetOutput<
T><<<grids, threads, 0, context->exec_stream()>>>(
value.gate_value,
value.reset_output_value,
value.prev_out_value,
frame_size,
batch_size,
active_gate,
batch_size != 1);
CUDA_POST_KERNEL_CHECK;
if (value.prev_out_value) {
CHECK(blas->init(false,
false,
batch_size,
frame_size,
frame_size,
frame_size,
frame_size,
frame_size * 3,
context));
blas->run(1.0f,
1.0f,
value.reset_output_value,
value.state_weight,
value.gate_value + frame_size * 2,
context);
}
lite::cuda::math::GruForwardFinalOutput<
T><<<grids, threads, 0, context->exec_stream()>>>(value.gate_value,
value.prev_out_value,
value.output_value,
frame_size,
batch_size,
active_node,
origin_mode,
batch_size != 1);
CUDA_POST_KERNEL_CHECK;
}
};
template struct GRUUnitFunctor<float>;
template <>
struct GRUUnitFunctor<half> {
static void compute(GRUMetaValue<half> value,
int frame_size,
int batch_size,
const lite::cuda::math::ActivationType& active_node,
const lite::cuda::math::ActivationType& active_gate,
bool origin_mode,
lite::cuda::math::Gemm<half, half>* blas,
CUDAContext* context) {
dim3 threads, grids;
if (batch_size == 1) {
int frame_per_block = frame_size <= 1024 ? frame_size : 1024;
int frame_blocks = (frame_size + 1024 - 1) / 1024;
threads = dim3(frame_per_block, 1);
grids = dim3(frame_blocks, 1);
} else {
threads = dim3(32, 32);
grids = dim3((frame_size + 32 - 1) / 32, (batch_size + 32 - 1) / 32);
}
if (value.prev_out_value) {
CHECK(blas->init(false,
false,
batch_size,
frame_size * 2,
frame_size,
frame_size,
frame_size * 2,
frame_size * 3,
context));
blas->run(1.0f,
1.0f,
value.prev_out_value,
value.gate_weight,
value.gate_value,
context);
}
lite::cuda::math::GruForwardResetOutput<
half><<<grids, threads, 0, context->exec_stream()>>>(
value.gate_value,
value.reset_output_value,
value.prev_out_value,
frame_size,
batch_size,
active_gate,
batch_size == 1);
CUDA_POST_KERNEL_CHECK;
if (value.prev_out_value) {
CHECK(blas->init(false,
false,
batch_size,
frame_size,
frame_size,
frame_size,
frame_size,
frame_size * 3,
context));
blas->run(1.0f,
1.0f,
value.reset_output_value,
value.state_weight,
value.gate_value + frame_size * 2,
context);
}
lite::cuda::math::GruForwardFinalOutput<
half><<<grids, threads, 0, context->exec_stream()>>>(
value.gate_value,
value.prev_out_value,
value.output_value,
frame_size,
batch_size,
active_node,
origin_mode,
batch_size == 1);
CUDA_POST_KERNEL_CHECK;
}
};
template <typename T, PrecisionType PType>
void GRUCompute<T, PType>::PrepareForRun() {
gemm_impl_.reset(new lite::cuda::math::Gemm<T, T>);
}
template <typename T, PrecisionType PType>
void GRUCompute<T, PType>::Run() {
auto& context = this->ctx_->template As<CUDAContext>();
auto stream = context.exec_stream();
auto& param = this->template Param<param_t>();
auto* input = param.input;
T* x_data =
const_cast<lite::Tensor*>(input)->template mutable_data<T>(TARGET(kCUDA));
lite::Tensor* h0{nullptr};
if (param.h0) {
h0 = const_cast<lite::Tensor*>(param.h0);
}
lite::Tensor* bias{nullptr};
if (param.bias) {
bias = const_cast<lite::Tensor*>(param.bias);
}
const lite::Tensor* weight = param.weight;
T* weight_data = const_cast<T*>(weight->template data<T>());
lite::Tensor* batch_gate = param.batch_gate;
lite::Tensor* batch_reset_hidden_prev = param.batch_reset_hidden_prev;
lite::Tensor* batch_hidden = param.batch_hidden;
lite::Tensor* hidden = param.hidden;
T* batch_reset_hidden_prev_data =
batch_reset_hidden_prev->template mutable_data<T>(TARGET(kCUDA));
T* out_data = hidden->template mutable_data<T>(TARGET(kCUDA));
T* batch_gate_data = batch_gate->template mutable_data<T>(TARGET(kCUDA));
T* batch_hidden_data = batch_hidden->template mutable_data<T>(TARGET(kCUDA));
bool is_reverse = param.is_reverse;
auto active_node = lite::cuda::math::GetActiveType(param.activation);
auto active_gate = lite::cuda::math::GetActiveType(param.gate_activation);
bool origin_mode = param.origin_mode;
auto hidden_dims = hidden->dims();
int frame_size = hidden_dims[1];
LoD offset_vec_vec = input->lod();
std::vector<int> offset(offset_vec_vec[offset_vec_vec.size() - 1].size());
for (size_t i = 0; i < offset_vec_vec[offset_vec_vec.size() - 1].size();
++i) {
offset[i] = static_cast<int>(offset_vec_vec[offset_vec_vec.size() - 1][i]);
}
bool need_process = seq_utils_.GetSortedMap(offset, stream);
int emit_length = seq_utils_.GetEmitOffsetVec().size() - 1;
auto emit_offset_vec = seq_utils_.GetEmitOffsetVec();
if (need_process) {
seq_utils_.Seq2SortedSeq(
input->template data<T>(), batch_gate_data, 3 * frame_size, stream);
x_data = batch_gate_data;
out_data = batch_hidden_data;
}
if (bias) {
// TODO(wilber): validate when bias is not nullptr
lite::cuda::math::RowwiseAdd<T> add_bias;
add_bias(x_data,
bias->template data<T>(),
x_data,
frame_size,
batch_gate->numel(),
stream);
}
GRUMetaValue<T> gru_value;
gru_value.gate_weight = weight_data;
gru_value.state_weight = weight_data + 2 * frame_size * frame_size;
if (h0) {
// Since the batch computing for GRU reorders the input sequences
// according to their length. The initialized cell state also needs
// to reorder.
// TODO(wilber): validate when h0 is not nullptr
ordered_h0_.Resize(h0->dims());
lite::cuda::math::CopyMatrixRowsFunctor<T> row_shuffle;
row_shuffle(*h0, &ordered_h0_, batch_gate->lod()[2], true, stream);
gru_value.prev_out_value = ordered_h0_.mutable_data<T>(TARGET(kCUDA));
} else {
gru_value.prev_out_value = nullptr;
}
for (size_t n = 0; n < emit_length; ++n) {
int bstart = emit_offset_vec[n];
int bend = emit_offset_vec[n + 1];
int cur_batch_size = bend - bstart;
gru_value.output_value = out_data + bstart * frame_size;
gru_value.gate_value = x_data + bstart * frame_size * 3;
gru_value.reset_output_value =
batch_reset_hidden_prev_data + bstart * frame_size;
GRUUnitFunctor<T>::compute(gru_value,
frame_size,
cur_batch_size,
active_node,
active_gate,
origin_mode,
gemm_impl_.get(),
&context);
gru_value.prev_out_value = gru_value.output_value;
}
if (need_process) {
seq_utils_.SortedSeq2Seq(batch_hidden_data,
hidden->mutable_data<T>(TARGET(kCUDA)),
frame_size,
stream);
}
hidden->set_lod(input->lod());
}
} // namespace cuda
} // namespace kernels
} // namespace lite
} // namespace paddle
using GRUFp32 =
paddle::lite::kernels::cuda::GRUCompute<float, PRECISION(kFloat)>;
using GRUFp16 = paddle::lite::kernels::cuda::GRUCompute<half, PRECISION(kFP16)>;
REGISTER_LITE_KERNEL(gru, kCUDA, kFloat, kNCHW, GRUFp32, def)
.BindInput("Input", {LiteType::GetTensorTy(TARGET(kCUDA))})
.BindInput("H0", {LiteType::GetTensorTy(TARGET(kCUDA))})
.BindInput("Weight", {LiteType::GetTensorTy(TARGET(kCUDA))})
.BindInput("Bias", {LiteType::GetTensorTy(TARGET(kCUDA))})
.BindOutput("BatchGate", {LiteType::GetTensorTy(TARGET(kCUDA))})
.BindOutput("BatchResetHiddenPrev", {LiteType::GetTensorTy(TARGET(kCUDA))})
.BindOutput("BatchHidden", {LiteType::GetTensorTy(TARGET(kCUDA))})
.BindOutput("Hidden", {LiteType::GetTensorTy(TARGET(kCUDA))})
.Finalize();
REGISTER_LITE_KERNEL(gru, kCUDA, kFP16, kNCHW, GRUFp16, def)
.BindInput("Input",
{LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFP16))})
.BindInput("H0", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFP16))})
.BindInput("Weight",
{LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFP16))})
.BindInput("Bias", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFP16))})
.BindOutput("BatchGate",
{LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFP16))})
.BindOutput("BatchResetHiddenPrev",
{LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFP16))})
.BindOutput("BatchHidden",
{LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFP16))})
.BindOutput("Hidden",
{LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFP16))})
.Finalize();
|
the_stack
|
template<typename scalar_t>
__device__ __forceinline__ scalar_t logSumExp(scalar_t a, scalar_t b) {
// standard log-sum-exp trick is used here to provide better numerical stability
return (a >= b) ? a + std::log1p(exp(b-a)) : b + std::log1p(exp(a-b));
}
// Vanilla transducer loss function (i.e. forward-backward algorithm)
// Detail of this loss function can be found in:
// [1] Sequence Transduction with Recurrent Neural Networks.
// Forward (alpha) and backward (beta) path are launched together. Input is assumed to be converted
// into log scale by the preceding log_softmax layer
// Diagonal wavefront advancing usually used in dynamic programming is leveraged here.
// alpha and beta are of acc_t type, as they are essentially accumulators.
// This loss function supports packed input where a tensor of shape [B, T, U, H] is packed into
// [B_packed, H].
// Don't-care region (t > audLen) or (u > txtLen) is removed.
// To support the packed input, the starting offsets for each batch need to be specified with
// batchOffset.
template <typename scalar_t, typename acc_t>
__global__ void transducer_loss_forward(
const scalar_t* x,
const int* label,
const int* audLen,
const int* txtLen,
const int64_t* batchOffset,
int64_t dictSize, // 64-bit indexing for data tensor
int64_t blankIdx,
int64_t maxFLen,
int64_t maxGLen,
bool packedInput,
acc_t* alpha,
acc_t* beta,
scalar_t* loss) {
const int batch = blockIdx.y;
const int tid = threadIdx.x;
const auto myFLen = audLen[batch];
// Note that start of the sentence is added as 1 here
const auto myGLen = txtLen[batch] + 1;
const auto myLabel = label + batch * (maxGLen-1);
const int64_t myBatchOffset = packedInput ? (batch == 0 ? 0 : batchOffset[batch-1])
: batch * maxFLen * maxGLen;
const int64_t myStrideT = packedInput ? myGLen : maxGLen;
const scalar_t* myX = x + myBatchOffset * dictSize;
int u = tid;
if (blockIdx.x == 0){
// alpha path
acc_t* myAlpha = alpha + batch*maxFLen*maxGLen;
if (u == 0)
myAlpha[0] = 0;
__syncthreads();
for (int64_t step = 1; step < myFLen+myGLen-1; ++step){
// Move along the diagonal wavefront to leverage available parallelism
for (u = tid; u < myGLen; u += blockDim.x){
int64_t t = step - u;
if (t >= 0 and t < myFLen and u >= 0 and u < myGLen){
// Eq(16) in [1]
if (u == 0){
// alpha(t, u) = alpha(t-1, u) * null(t-1, u)
myAlpha[t*maxGLen + u] = myAlpha[(t-1)*maxGLen]
+ myX[((t-1)*myStrideT) * dictSize + blankIdx];
}
else if (t == 0){
// alpha(t, u-1) = alpha(t, u-1) * y(t, u-1)
myAlpha[u] = myAlpha[u - 1] + myX[(u - 1) * dictSize + myLabel[u - 1]];
}
else{
// alpha(t, u) = alpha(t-1, u) * null(t-1, u) + alpha(t, u-1) * y(t, u-1)
acc_t current = myAlpha[(t-1)*maxGLen + u]
+ myX[((t-1)*myStrideT + u) * dictSize + blankIdx];
acc_t next = myAlpha[t*maxGLen + u - 1]
+ myX[(t*myStrideT + u - 1) * dictSize + myLabel[u - 1]];
myAlpha[t*maxGLen + u] = logSumExp(next, current);
}
}
}
__syncthreads();
}
}
else if (blockIdx.x == 1){
// beta path
acc_t* myBeta = beta + batch*maxFLen*maxGLen;
if (u == 0){
myBeta[(myFLen-1)*maxGLen + myGLen - 1] = myX[((myFLen-1)*myStrideT
+ myGLen - 1) * dictSize + blankIdx];
}
__syncthreads();
for (int64_t step = myFLen+myGLen - 3; step >= 0; --step){
for (u = tid; u < myGLen; u += blockDim.x){
int64_t t = step - u;
if (t >= 0 and t < myFLen and u >=0 and u < myGLen){
// Eq(18) in [1]
if (u == myGLen - 1){
// beta(t, u) = beta(t+1, u) * null(t, u)
myBeta[t*maxGLen + u] = myBeta[(t+1)*maxGLen + u]
+ myX[(t*myStrideT + u) * dictSize + blankIdx];
}
else if (t == myFLen - 1){
// beta(t, u) = beta(t, u+1) * y(t, u)
myBeta[t*maxGLen + u] = myBeta[t*maxGLen + u + 1]
+ myX[(t*myStrideT + u) * dictSize + myLabel[u]];
}
else{
// beta(t, u) = beta(t+1, u)*null(t, u) + beta(t, u+1)*y(t, u)
acc_t current = myBeta[(t+1)*maxGLen + u]
+ myX[(t*myStrideT + u) * dictSize + blankIdx];
acc_t next = myBeta[t*maxGLen + u + 1]
+ myX[(t*myStrideT + u) * dictSize + myLabel[u]];
myBeta[t*maxGLen + u] = logSumExp(next, current);
}
}
}
__syncthreads();
}
if (tid == 0)
loss[batch] = -myBeta[0];
}
}
// transudcer loss function (i.e. forward-backward algorithm) with batch loading optimization.
// Compared to the vanilla version, there are two optimizations:
// 1. load x in batch through loop unrolling to reduce the latency.
// 2. Use registers and shared memory to hold alpha and beta values passed from one step the next.
// For simplicity, this kernel currently only supports U <= maxThread, which should be the common
// case. For cases where U > maxThread, the vanilla kernel is used as a fallback option.
// Detail of this loss function can be found in:
// [1] Sequence Transduction with Recurrent Neural Networks.
// Forward (alpha) and backward (beta) path are launched together. Input is assumed to be converted
// into log scale by the preceding log_softmax layer
// Diagonal wavefront advancing usually used in dynamic programming is leveraged here.
// alpha and beta are of acc_t type, as they are essentially accumulators.
// This loss function supports packed input where a tensor of shape [B, T, U, H] is packed into
// [B_packed, H].
// Don't-care region (t > audLen) or (u > txtLen) is removed.
// To support the packed input, the starting offsets for each batch need to be specified with
// batchOffset.
template <typename scalar_t, typename acc_t, int batchLdSize>
__global__ void transducer_loss_batch_load_forward(
const scalar_t* x,
const int* label,
const int* audLen,
const int* txtLen,
const int64_t* batchOffset,
int64_t dictSize,
int64_t blankIdx,
int64_t maxFLen,
int64_t maxGLen,
bool packedInput,
acc_t* alpha,
acc_t* beta,
scalar_t* loss) {
const int batch = blockIdx.y;
int u = threadIdx.x;
const auto myFLen = audLen[batch];
const auto myGLen = txtLen[batch] + 1;
const int64_t myBatchOffset = packedInput ? (batch == 0 ? 0 : batchOffset[batch-1])
: batch * maxFLen * maxGLen;
const int64_t myStrideT = packedInput ? myGLen : maxGLen;
const scalar_t* myX = x + myBatchOffset * dictSize;
scalar_t next[batchLdSize], current[batchLdSize];
extern __shared__ char smem8[];
auto smem = reinterpret_cast<acc_t*>(smem8);
if (blockIdx.x == 0){
// alpha path
acc_t* myAlpha = alpha + batch*maxFLen*maxGLen;
// two SMEM regions for double buffering read and write data to avoid data race
acc_t * const sharedAlpha[2] = {smem, smem+maxGLen};
sharedAlpha[0][u] = 0;
__syncthreads();
if (u == 0)
myAlpha[0] = 0;
auto myAlphaLabel = (u == 0) ? 0 : label[batch*(maxGLen-1) + u - 1];
// register used to pass value to the next step for the same thread
acc_t prvStepAlpha = 0;
for (int64_t step = 1; step < myFLen+myGLen-1+batchLdSize; step += batchLdSize){
// Move along the diagonal wavefront to leverage available parallelism
// Batch loading X through loop unrolling
#pragma unroll
for (int i = 0; i < batchLdSize; ++i){
if (step+i<myFLen+myGLen-1){
// index computing
int64_t t = step + i - u;
int64_t currentId = ((t-1)*myStrideT + u) * dictSize + blankIdx;
int64_t nextId = (t*myStrideT + u - 1) * dictSize + myAlphaLabel;
// main loading loop
if (t >= 0 and t < myFLen and u >= 0 and u < myGLen){
if (u == 0){
current[i] = myX[currentId];
}
else if (t == 0){
next[i] = myX[nextId];
}
else{
current[i] = myX[currentId];
next[i] = myX[nextId];
}
}
}
}
// main computing loop
for (int i = 0; i < batchLdSize; ++i){
// swap the pointer for double buffering
auto sharedAlphaRd = sharedAlpha[(step+i-1)%2];
auto sharedAlphaWr = sharedAlpha[(step+i)%2];
if (step+i<myFLen+myGLen-1){
int64_t t = step + i - u;
if (t >= 0 and t < myFLen and u >= 0 and u < myGLen){
// Eq(16) in [1]
if (u == 0)
prvStepAlpha = prvStepAlpha+current[i];
else if (t == 0)
prvStepAlpha = sharedAlphaRd[u-1]+next[i];
else
prvStepAlpha = logSumExp(prvStepAlpha+current[i], sharedAlphaRd[u-1]
+ next[i]);
sharedAlphaWr[u] = prvStepAlpha;
myAlpha[t*maxGLen + u] = prvStepAlpha;
}
}
__syncthreads();
}
}
}
else if (blockIdx.x == 1){
// beta path
acc_t* myBeta = beta + batch*maxFLen*maxGLen;
// two SMEM regions for double buffering read and write data to avoid data race
acc_t * const sharedBeta[2] = {smem, smem + maxGLen};
sharedBeta[0][u] = myX[((myFLen-1)*myStrideT + myGLen - 1) * dictSize + blankIdx];
__syncthreads();
auto myBetaLabel = (u == maxGLen - 1) ? 0 : label[batch*(maxGLen-1) + u];
// register used to pass value to the next step for the same thread
acc_t prvStepBeta = myX[((myFLen-1)*myStrideT + myGLen - 1) * dictSize + blankIdx];
if (u == 0)
myBeta[(myFLen-1)*maxGLen + myGLen - 1] = prvStepBeta;
for (int64_t step = 1; step < myFLen+myGLen-1; step += batchLdSize){
// Move along the diagonal wavefront to leverage available parallelism
// Batch loading X
#pragma unroll
for (int i = 0; i < batchLdSize; ++i){
if (step+i<myFLen+myGLen-1){
// index computing
int64_t t = myFLen+myGLen - (step + i) - 2 - u;
int64_t currentId = (t*myStrideT + u) * dictSize + blankIdx;
int64_t nextId = (t*myStrideT + u) * dictSize + myBetaLabel;
// main loading loop
if (t >= 0 and t < myFLen and u >= 0 and u < myGLen){
if (u == myGLen - 1){
current[i] = myX[currentId];
}
else if (t == myFLen - 1){
next[i] = myX[nextId];
}
else{
current[i] = myX[currentId];
next[i] = myX[nextId];
}
}
}
}
// main computing loop
for (int i = 0; i < batchLdSize; ++i){
// swap the pointer for double buffering
auto sharedBetaRd = sharedBeta[(step+i-1)%2];
auto sharedBetaWr = sharedBeta[(step+i)%2];
if (step+i<myFLen+myGLen-1){
int64_t t = myFLen+myGLen - (step + i) - 2 - u;
if (t >= 0 and t < myFLen and u >= 0 and u < myGLen){
// Eq(18) in [1]
if (u == myGLen - 1)
prvStepBeta = prvStepBeta+current[i];
else if (t == myFLen - 1)
prvStepBeta = sharedBetaRd[u+1]+next[i];
else
prvStepBeta = logSumExp(prvStepBeta+current[i], sharedBetaRd[u+1]
+ next[i]);
sharedBetaWr[u] = prvStepBeta;
myBeta[t*maxGLen + u] = prvStepBeta;
}
}
__syncthreads();
}
}
if (u == 0)
loss[batch] = -prvStepBeta;
}
}
// Vanilla transudcer loss backward operation.
// Detail of this loss function can be found in:
// [1] Sequence Transduction with Recurrent Neural Networks.
// For this backward kernel, bwd op for the preceding softmax is assumed to be handled elsewhere,
// hence only Eq(20) in [1] is implemented in this kernel.
// Each thread block works on [batch, t, :, :] of data. Each thread works on a specific u at a time
// Since only gradients for the correct token and null token need to be updated, gradients at other
// locations are initialized to 0.
// To support the packed input, the starting offsets for each batch need to be specified with
// batchOffset.
template <typename scalar_t, typename acc_t>
__global__ void transducer_loss_backward(
const scalar_t* x,
const scalar_t* lossGrad,
const int* audLen,
const int* txtLen,
const int* label,
const acc_t* alpha,
const acc_t* beta,
const int64_t* batchOffset,
int64_t dictSize,
int64_t blankIdx,
int64_t maxFLen,
int64_t maxGLen,
bool packedInput,
scalar_t* xGrad) {
const int tid = threadIdx.x;
const int t = blockIdx.x;
const int batch = blockIdx.y;
const int64_t myFLen = audLen[batch];
const int64_t myGLen = txtLen[batch] + 1;
const int64_t myBatchOffset = packedInput ? (batch == 0 ? 0 : batchOffset[batch-1])
: batch * maxFLen * maxGLen;
const int64_t myStrideT = packedInput ? myGLen : maxGLen;
auto myX = x + (myBatchOffset + t*myStrideT)*dictSize;
auto myAlpha = alpha + batch*maxFLen*maxGLen;
auto myBeta = beta + batch*maxFLen*maxGLen;
auto myXGrad = xGrad + (myBatchOffset + t*myStrideT)*dictSize;
auto myLabel = label + batch*(maxGLen-1);
int64_t u = tid;
while (t < myFLen and u < myGLen){
// Do the update
// loss = -ln(Pr(y*|x))
acc_t grad = std::log(lossGrad[batch]) + myAlpha[t*maxGLen + u] - myBeta[0];
if (u != myGLen - 1)
myXGrad[u*dictSize + myLabel[u]] = -std::exp(grad + myBeta[t*maxGLen + u + 1]
+ myX[u*dictSize + myLabel[u]]);
if (t == myFLen - 1 and u == myGLen - 1)
myXGrad[u*dictSize + blankIdx] = -std::exp(grad + myX[u*dictSize + blankIdx]);
else if (t != myFLen - 1)
myXGrad[u*dictSize + blankIdx] = -std::exp(grad + myBeta[(t+1)*maxGLen + u]
+ myX[u*dictSize + blankIdx]);
u += blockDim.x;
}
}
// Fused transudcer loss backward operation.
// Detail of this loss function can be found in:
// [1] Sequence Transduction with Recurrent Neural Networks.
// The bwd op of the preceding softmax layer is fused in this kernel.
// Each thread block works on [batch, t, u, :] of data. Each thread works on a specific h at a time
// To support the packed input, the starting offsets for each batch need to be specified with
// batchOffset.
template <typename scalar_t, typename acc_t>
__global__ void transducer_loss_fused_backward(
const scalar_t* x,
const scalar_t* lossGrad,
const int* audLen,
const int* txtLen,
const int* label,
const acc_t* alpha,
const acc_t* beta,
const int64_t* batchOffset,
int64_t dictSize,
int64_t blankIdx,
int64_t maxFLen,
int64_t maxGLen,
bool packedInput,
scalar_t* xGrad) {
const int tid = threadIdx.x;
const int u = blockIdx.x;
const int t = blockIdx.y;
const int batch = blockIdx.z;
const int64_t myFLen = audLen[batch];
const int64_t myGLen = txtLen[batch] + 1;
const int64_t myBatchOffset = packedInput ? (batch == 0 ? 0 : batchOffset[batch-1])
: batch * maxFLen * maxGLen;
const int64_t myStrideT = packedInput ? myGLen : maxGLen;
__shared__ acc_t commonFactor, myBetaTU;
auto myXGrad = xGrad + (myBatchOffset + t*myStrideT +u)*dictSize;
if (t < myFLen and u < myGLen){
auto myX = x + (myBatchOffset + t*myStrideT +u)*dictSize;
auto myAlpha = alpha + batch*maxFLen*maxGLen;
auto myBeta = beta + batch*maxFLen*maxGLen;
auto myLabel = label + batch*(maxGLen-1);
// load and store shared variables in SMEM
if (tid == 0){
commonFactor = std::log(lossGrad[batch]) + myAlpha[t*maxGLen + u] - myBeta[0];
myBetaTU = myBeta[t*maxGLen + u];
}
__syncthreads();
for (int64_t h = tid; h < dictSize; h += blockDim.x){
// Do the update
acc_t grad = commonFactor + myX[h]; // loss = -ln(Pr(y*|x))
acc_t myGrad = std::exp(grad + myBetaTU);
if (u != myGLen - 1 and h == myLabel[u]){
myGrad -= std::exp(grad + myBeta[t*maxGLen + u + 1]);
}
else if (h == blankIdx){
if (t == myFLen - 1 and u == myGLen - 1)
myGrad -= std::exp(grad);
else if (t != myFLen - 1)
myGrad -= std::exp(grad + myBeta[(t+1)*maxGLen + u]);
}
myXGrad[h] = myGrad;
}
}
else if (!packedInput){
// In non-pack mode, need to make sure the gradients for don't-care regions are zero.
for (int64_t h = tid; h < dictSize; h += blockDim.x){
myXGrad[h] = 0;
}
}
}
std::vector<torch::Tensor> transducer_loss_cuda_forward(
torch::Tensor x,
torch::Tensor label,
torch::Tensor audLen,
torch::Tensor txtLen,
torch::Tensor batchOffset,
int maxFLen,
int blankIdx,
int opt,
bool packedInput){
auto scalarType = x.scalar_type();
auto tensorOpt = x.options();
const int batchSize = label.size(0);
const int maxGLen = label.size(1) + 1;
const int dictSize = x.size(-1);
TORCH_CHECK(blankIdx >= 0 and blankIdx < dictSize,
"Expected blank index to be in the range of 0 to ",
dictSize-1,
", but got ",
blankIdx);
TORCH_CHECK(opt == -1 or opt == 0 or opt == 1,
"Got an invalid optimization level ",
opt);
// The data type of alpha and beta will be resolved at dispatch time,
// hence defined here and assigned later
torch::Tensor alpha;
torch::Tensor beta;
torch::Tensor loss = torch::empty({batchSize}, tensorOpt);
const auto deviceProperties = at::cuda::getCurrentDeviceProperties();
const auto maxThreadPerBlock = deviceProperties->maxThreadsPerBlock;
const auto maxSmemPerBlock = deviceProperties->sharedMemPerBlock;
const auto batchOffsetPtr = packedInput ? batchOffset.data_ptr<int64_t>() : nullptr;
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(scalarType, "transducer_loss_cuda_forward", ([&] {
// resolve accumulation type
using acc_t = at::acc_type<scalar_t, true>;
auto accType = c10::CppTypeToScalarType<acc_t>::value;
auto accTensorOpt = tensorOpt.dtype(accType);
alpha = torch::empty({batchSize, maxFLen, maxGLen}, accTensorOpt);
beta = torch::empty({batchSize, maxFLen, maxGLen}, accTensorOpt);
// decide what kernel to launch based on the problem size
// if the required SMEM size or number threads exceeds the limit, fall back to the vanilla
// kernel.
const auto smemSize = 2*maxGLen*sizeof(acc_t);
const auto optFallBack = (maxGLen > maxThreadPerBlock or smemSize > maxSmemPerBlock) ? 0
: (opt == -1) ? 1 : opt;
const int threads = std::min(maxThreadPerBlock, maxGLen);
const dim3 blocks(2, batchSize, 1);
if (optFallBack == 0)
transducer_loss_forward<<<blocks, threads, 0, stream>>>(
x.data_ptr<scalar_t>(),
label.data_ptr<int>(),
audLen.data_ptr<int>(),
txtLen.data_ptr<int>(),
batchOffsetPtr,
dictSize,
blankIdx,
maxFLen,
maxGLen,
packedInput,
alpha.data_ptr<acc_t>(),
beta.data_ptr<acc_t>(),
loss.data_ptr<scalar_t>());
else if (optFallBack == 1)
transducer_loss_batch_load_forward<scalar_t, acc_t, 4>
<<<blocks, threads, smemSize, stream>>>(
x.data_ptr<scalar_t>(),
label.data_ptr<int>(),
audLen.data_ptr<int>(),
txtLen.data_ptr<int>(),
batchOffsetPtr,
dictSize,
blankIdx,
maxFLen,
maxGLen,
packedInput,
alpha.data_ptr<acc_t>(),
beta.data_ptr<acc_t>(),
loss.data_ptr<scalar_t>());
}));
THCudaCheck(cudaGetLastError());
return {alpha, beta, loss};
}
torch::Tensor transducer_loss_cuda_backward(
torch::Tensor x,
torch::Tensor lossGrad,
torch::Tensor alpha,
torch::Tensor beta,
torch::Tensor audLen,
torch::Tensor txtLen,
torch::Tensor label,
torch::Tensor batchOffset,
int maxFLen,
int blankIdx,
int opt,
bool fuseSoftmaxBackward,
bool packedInput){
auto dtype = x.scalar_type();
torch::Tensor xGrad;
const int batchSize = label.size(0);
const int maxGLen = label.size(1) + 1;
const int dictSize = x.size(-1);
const auto deviceProperties = at::cuda::getCurrentDeviceProperties();
const int maxThreadPerBlock = deviceProperties->maxThreadsPerBlock;
const int warpSize = deviceProperties->warpSize;
const auto batchOffsetPtr = packedInput ? batchOffset.data_ptr<int64_t>() : nullptr;
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
if (fuseSoftmaxBackward){
// alloc empty tensors for performance, hence need to ensure zeros are writtern to
// don't-care region in the kernel.
xGrad = torch::empty_like(x);
// Would like each thread to work on 4 hidden units
const int workPerThread = 4;
// Don't want to have more than 128 threads per thread block
const int maxThreadPerElmt = std::min(128, maxThreadPerBlock);
const int threads = std::min(maxThreadPerElmt, std::max(warpSize,
(dictSize+workPerThread-1)/workPerThread));
const dim3 blocks(maxGLen, maxFLen, batchSize);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(dtype, "transducer_loss_cuda_backward", ([&] {
using acc_t = at::acc_type<scalar_t, true>;
transducer_loss_fused_backward<<<blocks, threads, 0, stream>>>(
x.data_ptr<scalar_t>(),
lossGrad.data_ptr<scalar_t>(),
audLen.data_ptr<int>(),
txtLen.data_ptr<int>(),
label.data_ptr<int>(),
alpha.data_ptr<acc_t>(),
beta.data_ptr<acc_t>(),
batchOffsetPtr,
dictSize,
blankIdx,
maxFLen,
maxGLen,
packedInput,
xGrad.data_ptr<scalar_t>());
}));
}
else{
// for non-fused kernel, the gradients need to be writtern are very sparse, hence initialize
// the tensor with all zeros.
xGrad = torch::zeros_like(x);
// don't launch more threads than needed.
const int threads = std::min(maxThreadPerBlock, maxGLen);
const dim3 blocks(maxFLen, batchSize);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(dtype, "transducer_loss_cuda_backward", ([&] {
using acc_t = at::acc_type<scalar_t, true>;
transducer_loss_backward<<<blocks, threads, 0, stream>>>(
x.data_ptr<scalar_t>(),
lossGrad.data_ptr<scalar_t>(),
audLen.data_ptr<int>(),
txtLen.data_ptr<int>(),
label.data_ptr<int>(),
alpha.data_ptr<acc_t>(),
beta.data_ptr<acc_t>(),
batchOffsetPtr,
dictSize,
blankIdx,
maxFLen,
maxGLen,
packedInput,
xGrad.data_ptr<scalar_t>());
}));
}
THCudaCheck(cudaGetLastError());
return xGrad;
}
|
the_stack
|
namespace fastertransformer {
template <typename T>
__global__ void transpose_cache_batch_major(T* k_dst,
T* v_dst,
const float* k_src,
const float* v_src,
const int* memory_seq_len,
const int head_num,
const int size_per_head,
const int memory_max_seq_len,
const int cache_max_len) {
const int hidden_dim = head_num * size_per_head;
const int x = (sizeof(T) == 4) ? 4 : 8;
const int size_per_head_split = size_per_head / x;
const int batch_id = blockIdx.x;
const int seq_id = blockIdx.y;
for (int id = threadIdx.x; id < head_num * size_per_head_split * x;
id += blockDim.x) {
int tmp_id = id;
int x_id = tmp_id % x;
tmp_id = (tmp_id - x_id) / x;
int size_id = tmp_id % size_per_head_split;
tmp_id = (tmp_id - size_id) / size_per_head_split;
int head_id = tmp_id % head_num;
int src_seq_id =
(seq_id < memory_seq_len[batch_id])
? (seq_id + memory_max_seq_len - memory_seq_len[batch_id])
: (seq_id - memory_seq_len[batch_id]);
// key: [B, head_num, L, size_per_head / x, x] -> [B, head_num,
// size_per_head / x, L, x]
k_dst[batch_id * hidden_dim * cache_max_len +
head_id * size_per_head * cache_max_len +
size_id * cache_max_len * x + seq_id * x + x_id] =
(T)k_src[batch_id * hidden_dim * memory_max_seq_len +
head_id * size_per_head * memory_max_seq_len +
src_seq_id * size_per_head + size_id * x + x_id];
// value: [B, head_num, L, size_per_head/x, x] -> [B, head_num, L,
// size_per_head/x, x]
v_dst[batch_id * hidden_dim * cache_max_len +
head_id * size_per_head * cache_max_len + seq_id * size_per_head +
size_id * x + x_id] =
(T)v_src[batch_id * hidden_dim * memory_max_seq_len +
head_id * size_per_head * memory_max_seq_len +
src_seq_id * size_per_head + size_id * x + x_id];
}
}
template <typename T>
__global__ void self_attention_kernel(const int* memory_sequence_length,
T* key_buf,
T* value_buf,
T* query_buf,
const T* self_Q_bias,
T* key_cache,
const T* self_K_bias,
T* value_cache,
const T* self_V_bias,
T* context_buf,
int batch_size,
int head_num,
int size_per_head,
const int step,
const int memory_max_seq_len,
const T scalar) {
extern __shared__ __align__(sizeof(T)) unsigned s_buf[];
T* sq = reinterpret_cast<T*>(s_buf);
T* logits = reinterpret_cast<T*>(&sq[size_per_head]);
int tid = threadIdx.x;
int bid = blockIdx.x / head_num;
int head_id = blockIdx.x % head_num;
int qkv_id = bid * head_num * size_per_head + head_id * size_per_head + tid;
int qkv_bias_id = head_id * size_per_head + tid;
if (tid < size_per_head)
sq[tid] = query_buf[qkv_id] + self_Q_bias[qkv_bias_id];
__syncthreads();
// offset for each step
int offset = batch_size * head_num * size_per_head;
for (int ite = 0; ite < step; ++ite) {
T key = tid < size_per_head ? key_cache[ite * offset + qkv_id] : (T)0.0f;
// for the last step, we should update K + bias_K to the cache
if (ite == step - 1 && tid < size_per_head) {
key = key_buf[qkv_id] + self_K_bias[qkv_bias_id];
key_cache[ite * offset + qkv_id] = key;
}
T val = (tid < size_per_head) ? key * sq[tid] * (T)(scalar) : (T)(0.0f);
T qk = blockReduceSum(val);
if (threadIdx.x == 0) {
logits[ite] = qk;
}
__syncthreads(); // try to remove
}
__syncthreads(); // try to remove
__shared__ float s_max_val, s_sum;
float local_i = (tid >= (memory_max_seq_len - memory_sequence_length[bid]) &&
(tid < step))
? (float)logits[tid]
: -1e20f;
float max_val = blockReduceMax<float>(local_i);
if (tid == 0) s_max_val = max_val;
__syncthreads();
local_i -= s_max_val;
float local_o = (tid >= (memory_max_seq_len - memory_sequence_length[bid]) &&
(tid < step))
? __expf(local_i)
: 0.0f;
float val = blockReduceSum<float>(local_o);
if (tid == 0) s_sum = val; // + 1e-6;
__syncthreads();
if (tid >= (memory_max_seq_len - memory_sequence_length[bid]) &&
(tid < step)) {
logits[tid] = local_o / s_sum;
} else if (tid < step) {
logits[tid] = static_cast<T>(0.0f);
}
__syncthreads();
if (tid < size_per_head) {
T sum = (T)0.0f;
for (int ite = 0; ite < step; ++ite) {
T value = value_cache[ite * offset + qkv_id];
// for the last step, we should update V + bias_V to the cache
if (ite == step - 1) {
value = value_buf[qkv_id] + self_V_bias[qkv_bias_id];
value_cache[ite * offset + qkv_id] = value;
}
sum += value * logits[ite];
}
context_buf[qkv_id] = sum;
}
}
template <typename T>
void self_attention_dispatch(const int* memory_sequence_length,
T* key_buf,
T* value_buf,
T* query_buf,
const T* self_Q_bias,
T* key_cache,
const T* self_K_bias,
T* value_cache,
const T* self_V_bias,
T* context_buf,
int batch_size,
int head_num,
int size_per_head,
const int step,
const int memory_max_seq_len,
cudaStream_t stream) {
const int block_sz = ATTENTION_BLOCK_SIZE;
T scalar = (T)(1.f / sqrtf(size_per_head * 1.0f));
dim3 grid(batch_size * head_num);
int cond = size_per_head * ((ATTENION_OPT) ? 1 : 0);
switch (cond) {
/*case 32:
masked_attention_kernel_opt<32, block_sz, T><<<grid, block_sz,
sizeof(float)*step, stream>>>(
key_buf, value_buf,
query_buf, self_Q_bias, key_cache, self_K_bias, value_cache,
self_V_bias, context_buf,
batch_size, head_num, step, scalar);
break;
case 64:
if(sizeof(T) == 2)
masked_attention_kernel_opt_half2<64, block_sz><<<grid, block_sz,
sizeof(float)*step, stream>>>(
key_buf, value_buf,
query_buf, self_Q_bias, key_cache, self_K_bias, value_cache,
self_V_bias, context_buf,
batch_size, head_num, step, scalar);
else
masked_attention_kernel_opt<64, block_sz, T><<<grid, block_sz,
sizeof(float)*step, stream>>>(
key_buf, value_buf,
query_buf, self_Q_bias,
key_cache, self_K_bias,
value_cache, self_V_bias,
context_buf,
batch_size, head_num, step, scalar);
break;
case 128:
if(sizeof(T) == 2)
masked_attention_kernel_opt_half2<128, block_sz><<<grid, block_sz,
sizeof(float)*step, stream>>>(
key_buf, value_buf,
query_buf, self_Q_bias, key_cache, self_K_bias, value_cache,
self_V_bias, context_buf,
batch_size, head_num, step, scalar);
else
masked_attention_kernel_opt<128, block_sz, T><<<grid, block_sz,
sizeof(float)*step, stream>>>(
key_buf, value_buf,
query_buf, self_Q_bias, key_cache, self_K_bias, value_cache,
self_V_bias, context_buf,
batch_size, head_num, step, scalar);
break;*/
default:
// default path
int block_size = 128;
// suppose size_per_head <= 128
if (step <= 64)
block_size = 64;
else if (step <= 128 && step > size_per_head)
block_size = 128;
else if (step > 128 && step <= 256)
block_size = 256;
else if (step > 256 && step <= 512)
block_size = 512;
else
block_size = 1024;
if ((int)block_size < size_per_head) {
block_size = size_per_head;
}
assert(block_size <= 1024);
dim3 block(block_size);
T scalar = 1 / sqrtf(size_per_head * 1.0f);
int shared_size = sizeof(T) * (size_per_head + step);
self_attention_kernel<T><<<grid, block, shared_size, stream>>>(
memory_sequence_length,
key_buf,
value_buf,
query_buf,
self_Q_bias,
key_cache,
self_K_bias,
value_cache,
self_V_bias,
context_buf,
batch_size,
head_num,
size_per_head,
step,
memory_max_seq_len,
scalar);
#ifndef NDEBUG
cudaDeviceSynchronize();
check_cuda_error(cudaGetLastError());
#endif
}
}
template void self_attention_dispatch(const int* memory_sequence_length,
float* key_buf,
float* value_buf,
float* query_buf,
const float* self_Q_bias,
float* key_cache,
const float* self_K_bias,
float* value_cache,
const float* self_V_bias,
float* context_buf,
int batch_size,
int head_num,
int size_per_head,
const int step,
const int memory_max_seq_len,
cudaStream_t stream);
template void self_attention_dispatch(const int* memory_sequence_length,
half* key_buf,
half* value_buf,
half* query_buf,
const half* self_Q_bias,
half* key_cache,
const half* self_K_bias,
half* value_cache,
const half* self_V_bias,
half* context_buf,
int batch_size,
int head_num,
int size_per_head,
const int step,
const int memory_max_seq_len,
cudaStream_t stream);
template <typename T>
void transpose_cache_batch_major_kernelLauncher(T* k_dst,
T* v_dst,
const float* k_src,
const float* v_src,
const int* memory_seq_len,
const int local_batch_size,
const int memory_max_seq_len,
const int cache_max_len,
const int size_per_head,
const int local_head_num,
cudaStream_t stream) {
constexpr int block_sz = 128;
dim3 grid(local_batch_size, memory_max_seq_len);
transpose_cache_batch_major<<<grid, block_sz, 0, stream>>>(k_dst,
v_dst,
k_src,
v_src,
memory_seq_len,
local_head_num,
size_per_head,
memory_max_seq_len,
cache_max_len);
}
template void transpose_cache_batch_major_kernelLauncher(
float* k_dst,
float* v_dst,
const float* k_src,
const float* v_src,
const int* memory_seq_len,
const int local_batch_size,
const int memory_max_seq_len,
const int cache_max_len,
const int size_per_head,
const int local_head_num,
cudaStream_t stream);
template void transpose_cache_batch_major_kernelLauncher(
half* k_dst,
half* v_dst,
const float* k_src,
const float* v_src,
const int* memory_seq_len,
const int local_batch_size,
const int memory_max_seq_len,
const int cache_max_len,
const int size_per_head,
const int local_head_num,
cudaStream_t stream);
}
|
the_stack
|
namespace nv {
template<typename T1, typename T2>
__global__ void gather(const size_t* d_idx,
const T1* d_x_in,
const T2* d_y_in,
T1* d_x_out,
T2* d_y_out,
size_t len) {
const size_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < len) {
const size_t idx = d_idx[i];
d_x_out[i] = d_x_in[idx];
d_y_out[i] = d_y_in[idx];
}
}
template<typename T>
__global__ void gather(const size_t* d_idx,
const T* d_x_in,
T* d_x_out,
size_t len) {
const size_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < len) {
const size_t idx = d_idx[i];
d_x_out[i] = d_x_in[idx];
}
}
template<typename T>
__global__ void scatter(const size_t* d_idx,
const T* d_x_in,
T* d_x_out,
size_t offset,
size_t len) {
const size_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < len) {
const size_t t = offset + i;
const size_t idx = d_idx[t];
d_x_out[idx] = d_x_in[t];
}
}
template<typename KeyType, typename ValType>
struct Resource {
size_t* d_idx;
size_t* h_part_sizes;
size_t* h_offsets;
KeyType* d_keys_parted;
ValType* d_vals_parted;
bool* d_status_parted;
KeyType** d_remote_keys;
ValType** d_remote_vals;
bool** d_remote_status;
//cudaEvent_t* events;
//cudaStream_t* local_streams;
//cudaStream_t* remote_streams;
};
struct Stream_event_resource {
// CUDA events
cudaEvent_t* local_events; // Events associated with local streams
cudaEvent_t* remote_events; // Event associated with remote streams
// CUDA streams
cudaStream_t* local_streams; // Local streams for calling GPU to communicate with other GPUs that contains the hashtable
cudaStream_t* remote_streams; // Remote streams for other GPUs to perform their task
cudaStream_t* caller_stream; // Local stream for calling GPU to perform its own task
};
template<typename KeyType, typename ValType, typename Allocator>
void create_local_resource(Resource<KeyType, ValType>& resource,
size_t len,
int num_part,
Allocator& allocator) {
allocator.malloc((void**) &resource.d_idx, len * sizeof(*resource.d_idx));
resource.h_part_sizes = new size_t[num_part];
resource.h_offsets = new size_t[num_part + 1];
allocator.malloc((void**) &resource.d_keys_parted, len * sizeof(*resource.d_keys_parted));
allocator.malloc((void**) &resource.d_vals_parted, len * sizeof(*resource.d_vals_parted));
allocator.malloc((void**) &resource.d_status_parted, len * sizeof(*resource.d_status_parted));
//resource.events = new cudaEvent_t[num_part];
//resource.local_streams = new cudaStream_t[num_part];
/*for (int i = 0; i < num_part; ++i) {
CUDA_CHECK(cudaStreamCreate(&resource.local_streams[i]));
CUDA_CHECK(cudaEventCreateWithFlags(&resource.events[i], cudaEventDisableTiming));
}*/
}
template<typename KeyType, typename ValType, typename Allocator>
void create_remote_resource(Resource<KeyType, ValType>& resource,
const std::vector<int>& gpu_id,
Allocator& allocator) {
const int num_part = gpu_id.size();
resource.d_remote_keys = new KeyType*[num_part];
resource.d_remote_vals = new ValType*[num_part];
resource.d_remote_status = new bool*[num_part];
//resource.remote_streams = new cudaStream_t[num_part];
CudaDeviceRestorer dev_restorer;
for (int i = 0; i < num_part; ++i) {
CUDA_CHECK(cudaSetDevice(gpu_id[i]));
//CUDA_CHECK(cudaStreamCreate(&resource.remote_streams[i]));
allocator.malloc((void**) &resource.d_remote_keys[i],
resource.h_part_sizes[i] * sizeof(**resource.d_remote_keys));
allocator.malloc((void**) &resource.d_remote_vals[i],
resource.h_part_sizes[i] * sizeof(**resource.d_remote_vals));
allocator.malloc((void**) &resource.d_remote_status[i],
resource.h_part_sizes[i] * sizeof(**resource.d_remote_status));
}
}
template<typename KeyType, typename ValType, typename Allocator>
void destroy_resource(Resource<KeyType, ValType>& resource,
const std::vector<int>& gpu_id,
Allocator& allocator) {
const int num_part = gpu_id.size();
{
CudaDeviceRestorer dev_restorer;
for (int i = 0; i < num_part; ++i) {
CUDA_CHECK(cudaSetDevice(gpu_id[i]));
allocator.free(resource.d_remote_keys[i]);
allocator.free(resource.d_remote_vals[i]);
allocator.free(resource.d_remote_status[i]);
//CUDA_CHECK(cudaStreamDestroy(resource.remote_streams[i]));
}
}
delete [] resource.d_remote_keys;
delete [] resource.d_remote_vals;
delete [] resource.d_remote_status;
//delete [] resource.remote_streams;
/*for (int i = 0; i < num_part; ++i) {
//CUDA_CHECK(cudaEventDestroy(resource.events[i]));
//CUDA_CHECK(cudaStreamDestroy(resource.local_streams[i]));
}*/
//delete [] resource.events;
//delete [] resource.local_streams;
allocator.free(resource.d_idx);
allocator.free(resource.d_keys_parted);
allocator.free(resource.d_vals_parted);
allocator.free(resource.d_status_parted);
delete [] resource.h_part_sizes;
delete [] resource.h_offsets;
}
template<typename KeyType, typename ValType, typename Allocator, typename KeyGPUMapPolicy_>
void create_resource_and_do_partition(const KeyType* d_keys,
Resource<KeyType, ValType>& resource,
size_t len,
const std::vector<int>& gpu_id,
cudaStream_t stream,
Allocator& allocator,
KeyGPUMapPolicy_& policy) {
const int num_gpu = gpu_id.size();
create_local_resource(resource,
len,
num_gpu,
allocator);
size_t* d_part_sizes;
allocator.malloc((void**) &d_part_sizes, num_gpu * sizeof(*d_part_sizes));
multisplit(d_keys,
resource.d_idx,
len,
d_part_sizes,
num_gpu,
stream,
allocator,
policy);
CUDA_CHECK(cudaStreamSynchronize(stream));
size_t* h_part_sizes = resource.h_part_sizes;
size_t* h_offsets = resource.h_offsets;
CUDA_CHECK(cudaMemcpy(h_part_sizes,
d_part_sizes,
num_gpu * sizeof(*h_part_sizes),
cudaMemcpyDeviceToHost));
allocator.free(d_part_sizes);
memcpy(h_offsets + 1, h_part_sizes, num_gpu * sizeof(*h_offsets));
h_offsets[0] = 0;
for (int i = 1; i < num_gpu + 1; ++i) {
h_offsets[i] += h_offsets[i-1];
}
create_remote_resource(resource,
gpu_id,
allocator);
}
template<typename T>
void send(const T* d_local,
T** d_remote,
const size_t* offsets,
const size_t* part_sizes,
cudaStream_t* local_streams,
const std::vector<int>& gpu_id) {
int dev_local = get_dev(d_local);
assert(dev_local >= 0);
const int num_gpu = gpu_id.size();
for (int i = 0; i < num_gpu; ++i) {
if (part_sizes[i] == 0) {
continue;
}
CUDA_CHECK(cudaMemcpyPeerAsync(d_remote[i],
gpu_id[i],
&d_local[offsets[i]],
dev_local,
part_sizes[i] * sizeof(T),
local_streams[i]
));
}
}
template<typename T>
void receive(T* d_local,
const T* const * d_remote,
const size_t* offsets,
const size_t* part_sizes,
cudaStream_t* remote_streams,
const std::vector<int>& gpu_id) {
CudaDeviceRestorer dev_restorer;
int dev_local = get_dev(d_local);
assert(dev_local >= 0);
const int num_gpu = gpu_id.size();
for (int i = 0; i < num_gpu; ++i) {
if (part_sizes[i] == 0) {
continue;
}
CUDA_CHECK(cudaSetDevice(gpu_id[i]));
CUDA_CHECK(cudaMemcpyPeerAsync(&d_local[offsets[i]],
dev_local,
d_remote[i],
gpu_id[i],
part_sizes[i] * sizeof(T),
remote_streams[i]
));
}
}
void remote_wait_local_streams(cudaEvent_t* events,
cudaStream_t* local_streams,
cudaStream_t* remote_streams,
int num_gpu) {
for (int i = 0; i < num_gpu; ++i) {
CUDA_CHECK(cudaEventRecord(events[i], local_streams[i]));
CUDA_CHECK(cudaStreamWaitEvent(remote_streams[i], events[i], 0));
}
}
void sync_remotes(cudaStream_t* remote_streams, const std::vector<int>& gpu_id) {
// CudaDeviceRestorer dev_restorer;
const int num_gpu = gpu_id.size();
for (int i = 0; i < num_gpu; ++i) {
// CUDA_CHECK(cudaSetDevice(gpu_id[i]));
CUDA_CHECK(cudaStreamSynchronize(remote_streams[i]));
}
}
template<typename KeyType,
typename ValType,
typename KeyGPUMapPolicy_,
KeyType empty_key = std::numeric_limits<KeyType>::max()>
class MultiGpuHashTable {
public:
MultiGpuHashTable(size_t capacity, const int* gpu_id, int gpu_id_len);
~MultiGpuHashTable();
MultiGpuHashTable(const MultiGpuHashTable&) = delete;
MultiGpuHashTable& operator=(const MultiGpuHashTable&) = delete;
void insert(const KeyType* d_keys, const ValType* d_vals, size_t len, Stream_event_resource& s_e_resource) {
insert_or_set_helper_(d_keys, d_vals, len, &Table_::insert, s_e_resource);
}
void set(const KeyType* d_keys, const ValType* d_vals, size_t len, Stream_event_resource& s_e_resource) {
insert_or_set_helper_(d_keys, d_vals, len, &Table_::set, s_e_resource);
}
void get(const KeyType* d_keys, ValType* d_vals, bool* d_status, size_t len, Stream_event_resource& s_e_resource) const;
void insert_from_cpu(int gpu_id, const KeyType* h_key,
const ValType* h_val, size_t len, const size_t buffer_len,
const int buffer_count=2);
size_t get_size(int gpu_id) const;
void dump_to_cpu(int gpu_id, KeyType* h_key,
ValType* h_val, size_t len, const size_t buffer_len,
const int buffer_count=2) const;
void dump_to_gpu(int gpu_id, KeyType* d_key,
ValType* d_val, size_t len) const;
//void accum(const KeyType* d_keys, const ValType* d_vals, size_t len, Stream_event_resource& s_e_resource);
Stream_event_resource stream_event_resource_create(int gpu_id) const;
void stream_event_resource_destroy(Stream_event_resource& resource) const;
template<typename GradType, typename Optimizer>
void update(const KeyType* d_keys, const GradType* d_gradient, size_t len, Optimizer& op, Stream_event_resource& s_e_resource);
void clear(Stream_event_resource& s_e_resource);
void remove(const KeyType* d_keys, size_t len, Stream_event_resource& s_e_resource);
private:
static const int BLOCK_SIZE_ = 256;
using Table_ = nv::HashTable<KeyType, ValType, empty_key>;
using TableFunction_ = void (Table_::*)(const KeyType* d_keys, const ValType* d_vals, size_t len, cudaStream_t stream);
void insert_or_set_helper_(const KeyType* d_keys, const ValType* d_vals, size_t len, TableFunction_ func, Stream_event_resource& s_e_resource);
const int num_gpu_;
std::vector<int> gpu_id_;
std::vector<Table_*> tables_;
mutable nv::CubAllocator allocator_;
KeyGPUMapPolicy_ KeyGPUMapPolicy;
//The Global Hashtable lock for update APIs
std::mutex update_mtx_;
};
template<typename KeyType, typename ValType, typename KeyGPUMapPolicy_, KeyType empty_key>
Stream_event_resource MultiGpuHashTable<KeyType, ValType, KeyGPUMapPolicy_, empty_key>::stream_event_resource_create(int gpu_id) const{
/* Check for any invalid input */
assert(gpu_id >= 0);
/* We do not check whether the GPU is within the GPU list, user may want to call get/set/insert/accum on non-hashtable GPU */
/* User need to be caution to make sure the GPU ID is the GPU ID he want */
/* Save Current device */
CudaDeviceRestorer dev_restorer;
/* Set up GPU */
CUDA_CHECK(cudaSetDevice(gpu_id));
/* The resource that will be returned */
Stream_event_resource resource;
/* How many streams and event we need to create */
assert(gpu_id_.size() == num_gpu_);
const int num_gpu = gpu_id_.size();
/* Create local streams */
resource.local_streams = new cudaStream_t[num_gpu];
for (int i = 0; i < num_gpu; ++i) {
CUDA_CHECK(cudaStreamCreate(&resource.local_streams[i]));
}
/* Create remote streams */
resource.remote_streams = new cudaStream_t[num_gpu];
for (int i = 0; i < num_gpu; ++i) {
CUDA_CHECK(cudaSetDevice(gpu_id_[i]));
CUDA_CHECK(cudaStreamCreate(&resource.remote_streams[i]));
}
/* Reset current device */
CUDA_CHECK(cudaSetDevice(gpu_id));
/* Create caller stream */
resource.caller_stream = new cudaStream_t[1];
CUDA_CHECK(cudaStreamCreate(&resource.caller_stream[0]));
/* Create local event */
resource.local_events = new cudaEvent_t[num_gpu];
for (int i = 0; i < num_gpu; ++i) {
CUDA_CHECK(cudaEventCreateWithFlags(&resource.local_events[i], cudaEventDisableTiming));
}
/* Create remote event */
resource.remote_events = new cudaEvent_t[num_gpu];
for (int i = 0; i < num_gpu; ++i) {
CUDA_CHECK(cudaSetDevice(gpu_id_[i]));
CUDA_CHECK(cudaEventCreateWithFlags(&resource.remote_events[i], cudaEventDisableTiming));
}
return resource;
}
template<typename KeyType, typename ValType, typename KeyGPUMapPolicy_, KeyType empty_key>
void MultiGpuHashTable<KeyType, ValType, KeyGPUMapPolicy_, empty_key>::stream_event_resource_destroy(Stream_event_resource& resource) const{
/* How many streams and event we need to Destroy */
const int num_gpu = gpu_id_.size();
/* Destroy local streams */
for (int i = 0; i < num_gpu; ++i) {
CUDA_CHECK(cudaStreamDestroy(resource.local_streams[i]));
}
delete [] resource.local_streams;
/* Destroy remote streams */
for (int i = 0; i < num_gpu; ++i) {
CUDA_CHECK(cudaStreamDestroy(resource.remote_streams[i]));
}
delete [] resource.remote_streams;
/* Destroy caller stream */
CUDA_CHECK(cudaStreamDestroy(resource.caller_stream[0]));
delete [] resource.caller_stream;
/* Destroy local event */
for (int i = 0; i < num_gpu; ++i) {
CUDA_CHECK(cudaEventDestroy(resource.local_events[i]));
}
delete [] resource.local_events;
/* Destroy remote event */
for (int i = 0; i < num_gpu; ++i) {
CUDA_CHECK(cudaEventDestroy(resource.remote_events[i]));
}
delete [] resource.remote_events;
}
template<typename KeyType, typename ValType, typename KeyGPUMapPolicy_, KeyType empty_key>
void MultiGpuHashTable<KeyType, ValType, KeyGPUMapPolicy_, empty_key>::insert_from_cpu(int gpu_id, const KeyType* h_key,
const ValType* h_val, size_t len, const size_t buffer_len,
const int buffer_count){
/* Check for any invalid input*/
if(len <= 0){
return;
}
assert(buffer_count >= 1);
assert(gpu_id >= 0);
assert(buffer_len >= 1);
/* Calculate which table to use(i.e. The index of "gpu_id" in gpu_id_ vector) */
std::vector <int>::iterator iElement = std::find(gpu_id_.begin(),gpu_id_.end(),gpu_id);
assert(iElement != gpu_id_.end());
int table_index = std::distance(gpu_id_.begin(),iElement);
/* Save Current device */
CudaDeviceRestorer dev_restorer;
/* Set up GPU and allocate resources*/
CUDA_CHECK(cudaSetDevice(gpu_id));
cudaStream_t streams[buffer_count];
for(int i=0 ; i < buffer_count; i++){
CUDA_CHECK(cudaStreamCreate(&(streams[i])));
}
KeyType* d_temp_key[buffer_count];
ValType* d_temp_val[buffer_count];
for(int i=0 ; i < buffer_count ; i++){
CUDA_CHECK(cudaMallocManaged( (void**) &(d_temp_key[i]), sizeof(*h_key) * buffer_len));
CUDA_CHECK(cudaMallocManaged( (void**) &(d_temp_val[i]), sizeof(*h_val) * buffer_len));
}
/* Counters recording how much we have done*/
size_t len_counter=0;
int pipeline_counter=0;
/* Assign tasks to different pipeline, until all <K,V> are inserted */
while(len_counter < len){
int current_stream = pipeline_counter % buffer_count;
int copy_len = (len_counter+buffer_len > len ? len-len_counter : buffer_len);
CUDA_CHECK(cudaMemcpyAsync(d_temp_key[current_stream], h_key + len_counter, sizeof(*h_key) * copy_len ,cudaMemcpyHostToDevice , streams[current_stream]));
CUDA_CHECK(cudaMemcpyAsync(d_temp_val[current_stream], h_val + len_counter, sizeof(*h_val) * copy_len ,cudaMemcpyHostToDevice , streams[current_stream]));
tables_[table_index]->insert(d_temp_key[current_stream], d_temp_val[current_stream], copy_len, streams[current_stream]);
pipeline_counter++;
len_counter+=copy_len;
}
/* Waiting on all tasks to finish*/
for(int i=0 ; i < buffer_count; i++){
CUDA_CHECK(cudaStreamSynchronize(streams[i]));
}
/* Finished tasks and clean up*/
for(int i=0 ; i < buffer_count; i++){
CUDA_CHECK(cudaStreamDestroy(streams[i]));
}
for(int i=0 ; i < buffer_count ; i++){
CUDA_CHECK(cudaFree(d_temp_key[i]));
CUDA_CHECK(cudaFree(d_temp_val[i]));
}
}
template<typename KeyType, typename ValType, typename KeyGPUMapPolicy_, KeyType empty_key>
size_t MultiGpuHashTable<KeyType, ValType, KeyGPUMapPolicy_, empty_key>::get_size(int gpu_id) const{
/* Check for any invalid input */
assert(gpu_id >= 0);
/* Calculate which table to use(i.e. The index of "gpu_id" in gpu_id_ vector) */
std::vector <int>::const_iterator iElement = std::find(gpu_id_.begin(),gpu_id_.end(),gpu_id);
assert(iElement != gpu_id_.end());
int table_index = std::distance(gpu_id_.begin(),iElement);
/* Save Current device */
CudaDeviceRestorer dev_restorer;
/* Set up GPU and allocate resources*/
CUDA_CHECK(cudaSetDevice(gpu_id));
cudaStream_t stream;
CUDA_CHECK(cudaStreamCreate(&stream));
/* Caculate the actual size of the hash table on this GPU */
size_t hash_table_size;
hash_table_size = tables_[table_index]-> get_size(stream);
/* Finished tasks and clean up */
CUDA_CHECK(cudaStreamDestroy(stream));
return hash_table_size;
}
template<typename KeyType, typename ValType, typename KeyGPUMapPolicy_, KeyType empty_key>
void MultiGpuHashTable<KeyType, ValType, KeyGPUMapPolicy_, empty_key>::dump_to_cpu(int gpu_id, KeyType* h_key,
ValType* h_val, size_t len, const size_t buffer_len,
const int buffer_count) const{
/* Check for any invalid input*/
if(len <= 0){
return;
}
assert(buffer_count >= 1);
assert(gpu_id >= 0);
assert(buffer_len >= 1);
/* Calculate which table to use(i.e. The index of "gpu_id" in gpu_id_ vector) */
std::vector <int>::const_iterator iElement = std::find(gpu_id_.begin(),gpu_id_.end(),gpu_id);
assert(iElement != gpu_id_.end());
int table_index = std::distance(gpu_id_.begin(),iElement);
/* Save Current device */
CudaDeviceRestorer dev_restorer;
/* Set up GPU and allocate resources*/
CUDA_CHECK(cudaSetDevice(gpu_id));
cudaStream_t streams[buffer_count];
cudaEvent_t Events[buffer_count];
size_t * d_dump_counter[buffer_count];
size_t * h_dump_counter;
size_t h_ptr = 0;
for(int i=0 ; i < buffer_count; i++){
CUDA_CHECK(cudaStreamCreate(&(streams[i])));
}
for(int i=0 ; i < buffer_count; i++){
CUDA_CHECK(cudaEventCreate(&(Events[i])));
}
h_dump_counter = (size_t *) malloc(sizeof(size_t) * buffer_count);
KeyType* d_temp_key[buffer_count];
ValType* d_temp_val[buffer_count];
for(int i=0 ; i < buffer_count ; i++){
CUDA_CHECK(cudaMallocManaged( (void**) &(d_temp_key[i]), sizeof(*h_key) * buffer_len));
CUDA_CHECK(cudaMallocManaged( (void**) &(d_temp_val[i]), sizeof(*h_val) * buffer_len));
CUDA_CHECK(cudaMallocManaged( (void**) &(d_dump_counter[i]), sizeof(size_t)));
}
/* Counters recording how much we have done*/
const size_t table_capacity = tables_[table_index] -> get_capacity(); // The Actual capacity of hashtable on gpu_id.
size_t len_counter = 0; // How much of the hash table we have processed
/* Assign tasks to different pipeline, until all of the hashtable is processed */
while(len_counter < table_capacity){
size_t valid_stream = 0;
int pipeline_counter = 0;
while(len_counter < table_capacity && pipeline_counter < buffer_count){
int current_stream = pipeline_counter % buffer_count;
int search_length = (len_counter + buffer_len > table_capacity ? table_capacity-len_counter : buffer_len);
tables_[table_index]->dump(d_temp_key[current_stream], d_temp_val[current_stream],
len_counter, search_length, d_dump_counter[current_stream], streams[current_stream]);
CUDA_CHECK(cudaMemcpyAsync(&(h_dump_counter[current_stream]), d_dump_counter[current_stream],
sizeof(size_t), cudaMemcpyDeviceToHost, streams[current_stream]));
CUDA_CHECK(cudaEventRecord( Events[current_stream], streams[current_stream]));
len_counter += search_length;
pipeline_counter++ ;
valid_stream++ ;
}
pipeline_counter = 0;
while(valid_stream > 0){
int current_stream = pipeline_counter % buffer_count;
CUDA_CHECK(cudaEventSynchronize(Events[current_stream]));
CUDA_CHECK(cudaMemcpyAsync(h_key + h_ptr, d_temp_key[current_stream], sizeof(*h_key) * h_dump_counter[current_stream],
cudaMemcpyDeviceToHost, streams[current_stream]));
CUDA_CHECK(cudaMemcpyAsync(h_val + h_ptr, d_temp_val[current_stream], sizeof(*h_val) * h_dump_counter[current_stream],
cudaMemcpyDeviceToHost, streams[current_stream]));
h_ptr += h_dump_counter[current_stream];
valid_stream-- ;
pipeline_counter++ ;
}
}
// Double check with get_size output
assert(h_ptr == len);
/* Waiting on all tasks to finish*/
for(int i=0 ; i < buffer_count; i++){
CUDA_CHECK(cudaStreamSynchronize(streams[i]));
}
/* Finished tasks and clean up*/
for(int i=0 ; i < buffer_count; i++){
CUDA_CHECK(cudaStreamDestroy(streams[i]));
}
for(int i=0 ; i < buffer_count; i++){
CUDA_CHECK(cudaEventDestroy(Events[i]));
}
for(int i=0 ; i < buffer_count ; i++){
CUDA_CHECK(cudaFree(d_temp_key[i]));
CUDA_CHECK(cudaFree(d_temp_val[i]));
CUDA_CHECK(cudaFree(d_dump_counter[i]));
}
free((void*) h_dump_counter);
}
template<typename KeyType, typename ValType, typename KeyGPUMapPolicy_, KeyType empty_key>
void MultiGpuHashTable<KeyType, ValType, KeyGPUMapPolicy_, empty_key>::dump_to_gpu(int gpu_id,
KeyType* d_key,
ValType* d_val,
size_t len) const{
/* Check for any invalid input*/
if(len <= 0){
return;
}
assert(gpu_id >= 0);
/* Calculate which table to use(i.e. The index of "gpu_id" in gpu_id_ vector) */
std::vector <int>::const_iterator iElement = std::find(gpu_id_.begin(),gpu_id_.end(),gpu_id);
assert(iElement != gpu_id_.end());
int table_index = std::distance(gpu_id_.begin(),iElement);
/* Make Sure d_key and d_val buffer are on the same device */
assert(get_dev(d_key) == get_dev(d_val));
/* Make Sure GPU buffer provided by the user is on the same device as requested GPU or hashtable */
assert(get_dev(d_key) == gpu_id);
/* Save Current device */
CudaDeviceRestorer dev_restorer;
/* Set to GPU and allocate resource */
CUDA_CHECK(cudaSetDevice(gpu_id));
cudaStream_t stream;
size_t * d_dump_counter;
size_t h_dump_counter;
CUDA_CHECK(cudaStreamCreate(&stream));
CUDA_CHECK(cudaMallocManaged( (void**) &(d_dump_counter), sizeof(size_t)));
// The Actual capacity of hashtable on gpu_id, NOT the size.
const size_t table_capacity = tables_[table_index] -> get_capacity();
/* Dump the hashtable on the required GPU to the buffer provided */
tables_[table_index]->dump(d_key, d_val, 0, table_capacity, d_dump_counter, stream);
CUDA_CHECK(cudaMemcpyAsync(&h_dump_counter, d_dump_counter,
sizeof(size_t), cudaMemcpyDeviceToHost, stream));
/* Waiting on all tasks to finish*/
CUDA_CHECK(cudaStreamSynchronize(stream));
// Double check with get_size output
assert(h_dump_counter == len);
/* Finished tasks and clean up*/
CUDA_CHECK(cudaStreamDestroy(stream));
CUDA_CHECK(cudaFree(d_dump_counter));
}
template<typename KeyType, typename ValType, typename KeyGPUMapPolicy_, KeyType empty_key>
MultiGpuHashTable<KeyType, ValType, KeyGPUMapPolicy_, empty_key>::MultiGpuHashTable(size_t capacity, const int* gpu_id, int gpu_id_len)
: num_gpu_(gpu_id_len), gpu_id_(gpu_id, gpu_id + gpu_id_len), allocator_(gpu_id, gpu_id_len) {
assert(gpu_id_len > 0);
for (auto gpu : gpu_id_) {
assert(gpu >= 0);
}
assert(num_gpu_ == gpu_id_.size());
CudaDeviceRestorer dev_restorer;
for (int i = 0; i < gpu_id_len; ++i) {
CUDA_CHECK(cudaSetDevice(gpu_id[i]));
auto table = new Table_(capacity / gpu_id_len + 1);
tables_.push_back(table);
}
for (int cur = 0; cur < gpu_id_len; ++cur) {
CUDA_CHECK(cudaSetDevice(gpu_id[cur]));
for (int peer = 0; peer < gpu_id_len; ++peer) {
if (cur == peer) {
continue;
}
int can_access;
CUDA_CHECK(cudaDeviceCanAccessPeer(&can_access, gpu_id[cur], gpu_id[peer]));
if (can_access) {
cudaError_t ret = cudaDeviceEnablePeerAccess(gpu_id[peer], 0);
if (ret == cudaErrorPeerAccessAlreadyEnabled
&& cudaPeekAtLastError() == cudaErrorPeerAccessAlreadyEnabled) {
cudaGetLastError();
}
else {
CUDA_CHECK(ret);
}
}
}
}
}
template<typename KeyType, typename ValType, typename KeyGPUMapPolicy_, KeyType empty_key>
MultiGpuHashTable<KeyType, ValType, KeyGPUMapPolicy_, empty_key>::~MultiGpuHashTable() {
for (auto& table : tables_) {
delete table;
table = nullptr;
}
}
template<typename KeyType, typename ValType, typename KeyGPUMapPolicy_, KeyType empty_key>
void MultiGpuHashTable<KeyType, ValType, KeyGPUMapPolicy_, empty_key>::insert_or_set_helper_(const KeyType* d_keys,
const ValType* d_vals,
size_t len,
TableFunction_ func,
Stream_event_resource& s_e_resource) {
if (len == 0) {
return;
}
CudaDeviceRestorer dev_restorer;
assert(get_dev(d_keys) == get_dev(d_vals));
switch_to_dev(d_keys);
//cudaStream_t caller_stream;
//CUDA_CHECK(cudaStreamCreate(&caller_stream));
Resource<KeyType, ValType> resource;
create_resource_and_do_partition(d_keys,
resource,
len,
gpu_id_,
s_e_resource.caller_stream[0],
allocator_,
KeyGPUMapPolicy);
const int grid_size = (len - 1) / BLOCK_SIZE_ + 1;
gather<<<grid_size, BLOCK_SIZE_, 0, s_e_resource.caller_stream[0]>>>(resource.d_idx,
d_keys,
d_vals,
resource.d_keys_parted,
resource.d_vals_parted,
len);
CUDA_CHECK(cudaStreamSynchronize(s_e_resource.caller_stream[0]));
send(resource.d_keys_parted,
resource.d_remote_keys,
resource.h_offsets,
resource.h_part_sizes,
s_e_resource.local_streams,
gpu_id_);
send(resource.d_vals_parted,
resource.d_remote_vals,
resource.h_offsets,
resource.h_part_sizes,
s_e_resource.local_streams,
gpu_id_);
remote_wait_local_streams(s_e_resource.local_events,
s_e_resource.local_streams,
s_e_resource.remote_streams,
num_gpu_);
{
CudaDeviceRestorer dev_restorer;
for (int i = 0; i < num_gpu_; ++i) {
CUDA_CHECK(cudaSetDevice(gpu_id_[i]));
(tables_[i]->*func)(resource.d_remote_keys[i],
resource.d_remote_vals[i],
resource.h_part_sizes[i],
s_e_resource.remote_streams[i]);
}
}
sync_remotes(s_e_resource.remote_streams, gpu_id_);
destroy_resource(resource, gpu_id_, allocator_);
//CUDA_CHECK(cudaStreamDestroy(caller_stream));
}
template<typename KeyType, typename ValType, typename KeyGPUMapPolicy_, KeyType empty_key>
void MultiGpuHashTable<KeyType, ValType, KeyGPUMapPolicy_, empty_key>::get(const KeyType* d_keys, ValType* d_vals, bool* d_status,
size_t len, Stream_event_resource& s_e_resource) const {
if (len == 0) {
return;
}
CudaDeviceRestorer dev_restorer;
assert(get_dev(d_keys) == get_dev(d_vals));
assert(get_dev(d_keys) == get_dev(d_status));
switch_to_dev(d_keys);
//cudaStream_t caller_stream;
//CUDA_CHECK(cudaStreamCreate(&caller_stream));
Resource<KeyType, ValType> resource;
create_resource_and_do_partition(d_keys,
resource,
len,
gpu_id_,
s_e_resource.caller_stream[0],
allocator_,
KeyGPUMapPolicy);
const int grid_size = (len - 1) / BLOCK_SIZE_ + 1;
gather<<<grid_size, BLOCK_SIZE_, 0, s_e_resource.caller_stream[0]>>>(resource.d_idx,
d_keys,
resource.d_keys_parted,
len);
CUDA_CHECK(cudaStreamSynchronize(s_e_resource.caller_stream[0]));
send(resource.d_keys_parted,
resource.d_remote_keys,
resource.h_offsets,
resource.h_part_sizes,
s_e_resource.local_streams,
gpu_id_);
remote_wait_local_streams(s_e_resource.local_events,
s_e_resource.local_streams,
s_e_resource.remote_streams,
num_gpu_);
{
CudaDeviceRestorer dev_restorer;
for (int i = 0; i < num_gpu_; ++i) {
while(!(tables_[i]->get_lock())){
std::this_thread::sleep_for(std::chrono::milliseconds(2));
}
CUDA_CHECK(cudaSetDevice(gpu_id_[i]));
tables_[i]->get(resource.d_remote_keys[i],
resource.d_remote_vals[i],
resource.d_remote_status[i],
resource.h_part_sizes[i],
s_e_resource.remote_streams[i]);
}
}
//sync_remotes(s_e_resource.remote_streams, gpu_id_);
for (int i = 0; i < num_gpu_; ++i) {
// CUDA_CHECK(cudaSetDevice(gpu_id[i]));
CUDA_CHECK(cudaStreamSynchronize(s_e_resource.remote_streams[i]));
tables_[i]->get_release();
}
receive(resource.d_vals_parted,
resource.d_remote_vals,
resource.h_offsets,
resource.h_part_sizes,
s_e_resource.remote_streams,
gpu_id_);
// Also receive status
receive(resource.d_status_parted,
resource.d_remote_status,
resource.h_offsets,
resource.h_part_sizes,
s_e_resource.remote_streams,
gpu_id_);
//cudaEvent_t* remote_events = new cudaEvent_t[num_gpu_];
{
CudaDeviceRestorer dev_restorer;
for (int i = 0; i < num_gpu_; ++i) {
CUDA_CHECK(cudaSetDevice(gpu_id_[i]));
//CUDA_CHECK(cudaEventCreateWithFlags(&remote_events[i], cudaEventDisableTiming));
CUDA_CHECK(cudaEventRecord(s_e_resource.remote_events[i], s_e_resource.remote_streams[i]));
}
}
for (int i = 0; i < num_gpu_; ++i) {
CUDA_CHECK(cudaStreamWaitEvent(s_e_resource.local_streams[i], s_e_resource.remote_events[i], 0));
if(resource.h_part_sizes[i] <= 0){ // Important! size_t is uint_64, can't be 0 !
continue;
}
int tmp_grid_size = (resource.h_part_sizes[i] - 1) / BLOCK_SIZE_ + 1;
scatter<<<tmp_grid_size, BLOCK_SIZE_, 0, s_e_resource.local_streams[i]>>>(
resource.d_idx,
resource.d_vals_parted,
d_vals,
resource.h_offsets[i],
resource.h_part_sizes[i]);
//Also scatter the status
scatter<<<tmp_grid_size, BLOCK_SIZE_, 0, s_e_resource.local_streams[i]>>>(
resource.d_idx,
resource.d_status_parted,
d_status,
resource.h_offsets[i],
resource.h_part_sizes[i]);
}
for (int i = 0; i < num_gpu_; ++i) {
CUDA_CHECK(cudaStreamSynchronize(s_e_resource.local_streams[i]));
}
/*for (int i = 0; i < num_gpu_; ++i) {
CUDA_CHECK(cudaEventDestroy(remote_events[i]));
}*/
//delete [] remote_events;
//CUDA_CHECK(cudaStreamDestroy(caller_stream));
destroy_resource(resource, gpu_id_, allocator_);
}
/*template<typename KeyType, typename ValType, typename KeyGPUMapPolicy_, KeyType empty_key>
void MultiGpuHashTable<KeyType, ValType, KeyGPUMapPolicy_, empty_key>::accum(const KeyType* d_keys, const ValType* d_vals,
size_t len, Stream_event_resource& s_e_resource){
if (len == 0) {
return;
}
CudaDeviceRestorer dev_restorer;
assert(get_dev(d_keys) == get_dev(d_vals));
switch_to_dev(d_keys);
//cudaStream_t caller_stream;
//CUDA_CHECK(cudaStreamCreate(&caller_stream));
Resource<KeyType, ValType> resource;
create_resource_and_do_partition(d_keys,
resource,
len,
gpu_id_,
s_e_resource.caller_stream[0],
allocator_,
KeyGPUMapPolicy);
const int grid_size = (len - 1) / BLOCK_SIZE_ + 1;
gather<<<grid_size, BLOCK_SIZE_, 0, s_e_resource.caller_stream[0]>>>(resource.d_idx,
d_keys,
d_vals,
resource.d_keys_parted,
resource.d_vals_parted,
len);
CUDA_CHECK(cudaStreamSynchronize(s_e_resource.caller_stream[0]));
send(resource.d_keys_parted,
resource.d_remote_keys,
resource.h_offsets,
resource.h_part_sizes,
s_e_resource.local_streams,
gpu_id_);
send(resource.d_vals_parted,
resource.d_remote_vals,
resource.h_offsets,
resource.h_part_sizes,
s_e_resource.local_streams,
gpu_id_);
remote_wait_local_streams(s_e_resource.local_events,
s_e_resource.local_streams,
s_e_resource.remote_streams,
num_gpu_);
{
CudaDeviceRestorer dev_restorer;
for (int i = 0; i < num_gpu_; ++i) {
CUDA_CHECK(cudaSetDevice(gpu_id_[i]));
tables_[i]->accum(resource.d_remote_keys[i],
resource.d_remote_vals[i],
resource.h_part_sizes[i],
s_e_resource.remote_streams[i]);
}
}
sync_remotes(s_e_resource.remote_streams, gpu_id_);
destroy_resource(resource, gpu_id_, allocator_);
//CUDA_CHECK(cudaStreamDestroy(caller_stream));
}*/
template<typename KeyType, typename ValType, typename KeyGPUMapPolicy_, KeyType empty_key>
template<typename GradType, typename Optimizer>
void MultiGpuHashTable<KeyType, ValType, KeyGPUMapPolicy_, empty_key>::update(const KeyType* d_keys, const GradType* d_gradient,
size_t len, Optimizer& op,
Stream_event_resource& s_e_resource){
if (len == 0) {
return;
}
CudaDeviceRestorer dev_restorer;
assert(get_dev(d_keys) == get_dev(d_gradient));
switch_to_dev(d_keys);
//Here, we need to lock the global update lock!
update_mtx_.lock();
//cudaStream_t caller_stream;
//CUDA_CHECK(cudaStreamCreate(&caller_stream));
Resource<KeyType, GradType> resource;
create_resource_and_do_partition(d_keys,
resource,
len,
gpu_id_,
s_e_resource.caller_stream[0],
allocator_,
KeyGPUMapPolicy);
const int grid_size = (len - 1) / BLOCK_SIZE_ + 1;
gather<<<grid_size, BLOCK_SIZE_, 0, s_e_resource.caller_stream[0]>>>(resource.d_idx,
d_keys,
d_gradient,
resource.d_keys_parted,
resource.d_vals_parted,
len);
CUDA_CHECK(cudaStreamSynchronize(s_e_resource.caller_stream[0]));
send(resource.d_keys_parted,
resource.d_remote_keys,
resource.h_offsets,
resource.h_part_sizes,
s_e_resource.local_streams,
gpu_id_);
send(resource.d_vals_parted,
resource.d_remote_vals,
resource.h_offsets,
resource.h_part_sizes,
s_e_resource.local_streams,
gpu_id_);
remote_wait_local_streams(s_e_resource.local_events,
s_e_resource.local_streams,
s_e_resource.remote_streams,
num_gpu_);
{
CudaDeviceRestorer dev_restorer;
for (int i = 0; i < num_gpu_; ++i) {
while(!(tables_[i]->update_lock())){
std::this_thread::sleep_for(std::chrono::milliseconds(2));
}
CUDA_CHECK(cudaSetDevice(gpu_id_[i]));
tables_[i]->update(resource.d_remote_keys[i],
resource.d_remote_vals[i],
resource.h_part_sizes[i],
s_e_resource.remote_streams[i],
op);
}
}
//sync_remotes(s_e_resource.remote_streams, gpu_id_);
for (int i = 0; i < num_gpu_; ++i) {
// CUDA_CHECK(cudaSetDevice(gpu_id[i]));
CUDA_CHECK(cudaStreamSynchronize(s_e_resource.remote_streams[i]));
tables_[i]->update_release();
}
// Here, we need to release global update lock!
update_mtx_.unlock();
destroy_resource(resource, gpu_id_, allocator_);
//CUDA_CHECK(cudaStreamDestroy(caller_stream));
}
template<typename KeyType, typename ValType, typename KeyGPUMapPolicy_, KeyType empty_key>
void MultiGpuHashTable<KeyType, ValType, KeyGPUMapPolicy_, empty_key>::clear(Stream_event_resource& s_e_resource){
// Recover the device setting
CudaDeviceRestorer dev_restorer;
// Clear all hashtables
for (int i = 0; i < num_gpu_; ++i) {
CUDA_CHECK(cudaSetDevice(gpu_id_[i]));
tables_[i]->clear(s_e_resource.remote_streams[i]);
}
// Wait for all clear kernel to finish
for (int i = 0; i < num_gpu_; ++i) {
CUDA_CHECK(cudaStreamSynchronize(s_e_resource.remote_streams[i]));
}
}
template<typename KeyType, typename ValType, typename KeyGPUMapPolicy_, KeyType empty_key>
void MultiGpuHashTable<KeyType, ValType, KeyGPUMapPolicy_, empty_key>::remove(const KeyType* d_keys, size_t len, Stream_event_resource& s_e_resource){
if (len == 0) {
return;
}
CudaDeviceRestorer dev_restorer;
switch_to_dev(d_keys);
Resource<KeyType, ValType> resource;
create_resource_and_do_partition(d_keys,
resource,
len,
gpu_id_,
s_e_resource.caller_stream[0],
allocator_,
KeyGPUMapPolicy);
const int grid_size = (len - 1) / BLOCK_SIZE_ + 1;
gather<<<grid_size, BLOCK_SIZE_, 0, s_e_resource.caller_stream[0]>>>(resource.d_idx,
d_keys,
resource.d_keys_parted,
len);
CUDA_CHECK(cudaStreamSynchronize(s_e_resource.caller_stream[0]));
send(resource.d_keys_parted,
resource.d_remote_keys,
resource.h_offsets,
resource.h_part_sizes,
s_e_resource.local_streams,
gpu_id_);
remote_wait_local_streams(s_e_resource.local_events,
s_e_resource.local_streams,
s_e_resource.remote_streams,
num_gpu_);
// Remove keys on each GPU
{
CudaDeviceRestorer dev_restorer;
for (int i = 0; i < num_gpu_; ++i) {
CUDA_CHECK(cudaSetDevice(gpu_id_[i]));
tables_[i]->remove(resource.d_remote_keys[i],
resource.h_part_sizes[i],
s_e_resource.remote_streams[i]);
}
}
// Wait for all GPU delete kernels to finish
sync_remotes(s_e_resource.remote_streams, gpu_id_);
// Destroy resources
destroy_resource(resource, gpu_id_, allocator_);
}
}
#endif
|
the_stack
|
#include "k2/csrc/array_ops.h"
#include "k2/csrc/cub.h"
#include "k2/csrc/macros.h"
#include "k2/csrc/math.h"
#include "k2/csrc/ragged.h"
#include "k2/csrc/ragged_utils.h"
#include "k2/csrc/ragged_ops.h"
namespace k2 {
void CheckLayerEqual(int32_t layer,
int32_t num_srcs,
RaggedShape **src) {
NVTX_RANGE(K2_FUNC);
if (num_srcs <= 1)
return;
K2_CHECK(layer >= 0 && layer + 1 < src[0]->NumAxes());
std::vector<const int32_t*> row_splits_data_vec;
row_splits_data_vec.reserve(num_srcs);
int32_t row_splits_dim = 0, row_ids_dim = 0;
for (int32_t s = 0; s < num_srcs; s++) {
// RowSplits(1) .. is the lowest numbered row-splits...
const int32_t *data = src[s]->RowSplits(layer + 1).Data();
if (s == 0 || data != row_splits_data_vec[0])
row_splits_data_vec.push_back(data);
if (s == 0) {
row_splits_dim = src[s]->TotSize(layer) + 1;
row_ids_dim = src[s]->TotSize(layer + 1);
} else {
K2_CHECK_EQ(row_splits_dim, src[s]->TotSize(layer) + 1);
K2_CHECK_EQ(row_ids_dim, src[s]->TotSize(layer + 1));
}
}
if (row_splits_data_vec.size() <= 1) {
// No point in checking because the row_splits all had the same address.
return;
}
ContextPtr &c = src[0]->Context();
#ifndef NDEBUG
Array1<int32_t> is_bad(c, 1, 0);
Array1<const int32_t*> row_splits_ptrs(c, row_splits_data_vec);
const int32_t **row_splits_ptrs_data = row_splits_ptrs.Data();
int32_t *is_bad_data = is_bad.Data();
K2_EVAL2(c, row_splits_ptrs.Dim() - 1,
row_splits_dim, lambda_check_row_splits,
(int32_t i, int32_t j) -> void {
if (row_splits_ptrs_data[i+1][j] !=
row_splits_ptrs_data[0][j])
is_bad_data[0] = 1;
});
if (is_bad[0] == 1) {
std::ostringstream arrays_os;
for (int32_t i = 0; i < num_srcs; i++)
arrays_os << "Shape " << i << " = " << *(src[i]) << "; ";
K2_LOG(FATAL) << "Shapes were expected to be equal: "
<< arrays_os.str();
}
#endif
}
RaggedShape IntersperseRaggedLayer(int32_t layer,
int32_t num_srcs,
RaggedShape **src,
Array1<uint32_t> *merge_map) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GT(num_srcs, 0);
K2_CHECK_GE(layer, 0);
K2_CHECK_LT(layer + 1, src[0]->NumAxes());
if (num_srcs == 1) {
if (merge_map)
*(reinterpret_cast<Array1<int32_t>*>(merge_map)) =
Range(src[0]->Context(), src[0]->TotSize(layer + 1), 0);
return *src[0];
}
std::vector<int32_t*> row_splits_ptrs_vec(num_srcs);
int32_t num_axes = src[0]->NumAxes(),
num_rows = src[0]->TotSize(layer),
tot_elems = 0;
for (int32_t i = 0; i < num_srcs; ++i) {
if (i > 0) {
K2_CHECK_EQ(src[i]->NumAxes(), num_axes);
K2_CHECK_EQ(src[i]->TotSize(layer), num_rows);
}
Array1<int32_t> &row_splits = src[i]->RowSplits(layer + 1);
tot_elems += src[i]->TotSize(layer + 1);
row_splits_ptrs_vec[i] = row_splits.Data();
}
ContextPtr &c = src[0]->Context();
int32_t new_num_rows = num_rows * num_srcs;
Array1<int32_t> row_ids(c, tot_elems),
row_splits(c, new_num_rows + 1);
int32_t *row_splits_data = row_splits.Data();
Array1<int32_t*> row_splits_ptrs(c, row_splits_ptrs_vec);
int32_t **row_splits_ptrs_data = row_splits_ptrs.Data();
if (c->GetDeviceType() == kCpu) {
int32_t row_splits_sum = 0;
row_splits_data[0] = 0;
for (int32_t i = 0; i < num_rows; i++) {
for (int32_t j = 0; j < num_srcs; j++) {
int32_t row_len = row_splits_ptrs_data[j][i+1] -
row_splits_ptrs_data[j][i];
row_splits_sum += row_len;
row_splits_data[i * num_srcs + j + 1] = row_splits_sum;
}
}
} else {
if (num_srcs <= 16) {
// If num_srcs is not too large, we do an optimization. Instead
// of computing the length of each row (as row_splits[i+1] -
// row_splits[i]) and doing exclusive-sum to get the row_splits, we sum up
// `num_srcs` row_splits numbered `i, i+1, .. i+num_srcs-1.`
// (These numberings map to source i % num_srcs at position i / num_srcs).
// This gives us the same answer, with less latency.
auto lambda_get_row_splits = [=] __device__(int32_t i) -> void {
int32_t sum = 0;
for (int32_t j = i; j < i + num_srcs; j++) {
int32_t src = j % num_srcs,
pos = j / num_srcs;
int32_t this_row_split = row_splits_ptrs_data[src][pos];
sum += this_row_split;
}
row_splits_data[i] = sum;
};
EvalDevice(c, new_num_rows + 1, lambda_get_row_splits);
} else {
// Set the row_splits initially to the sizes, then do exclusive-sum.
auto lambda_get_sizes = [=] __device__(int32_t i) -> void {
int32_t src = i % num_srcs, pos = i / num_srcs;
int32_t this_size = row_splits_ptrs_data[src][pos + 1] -
row_splits_ptrs_data[src][pos];
row_splits_data[i] = this_size;
};
EvalDevice(c, new_num_rows, lambda_get_sizes);
ExclusiveSum(row_splits, &row_splits);
}
}
RowSplitsToRowIds(row_splits, &row_ids);
if (merge_map != nullptr) {
*merge_map = Array1<uint32_t>(c, tot_elems);
const int32_t *row_ids_data = row_ids.Data();
uint32_t *merge_map_data = merge_map->Data();
K2_EVAL(c, tot_elems, lambda_set_merge_map, (int32_t idx01) -> void {
int32_t idx0 = row_ids_data[idx01],
idx0x = row_splits_data[idx0],
idx1 = idx01 - idx0x,
src = idx0 % num_srcs,
src_idx0 = idx0 / num_srcs,
src_idx0x = row_splits_ptrs_data[src][src_idx0],
src_idx01 = src_idx0x + idx1;
// We multiply the src_idx01 by num_srcs as a way of encoding it and the
// src into a single integer.
merge_map_data[idx01] =
uint32_t(src) + ((uint32_t)num_srcs * uint32_t(src_idx01));
});
}
return RaggedShape2(&row_splits, &row_ids, tot_elems);
}
RaggedShape MergeRaggedLayer(int32_t layer,
int32_t num_srcs,
RaggedShape **src,
const Array1<uint32_t> &merge_map,
Array1<uint32_t> *merge_map_out /*= nullptr*/) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GT(num_srcs, 0);
K2_CHECK_GE(layer, 0);
K2_CHECK_LT(layer + 1, src[0]->NumAxes());
ContextPtr &c = src[0]->Context();
std::vector<int32_t*> row_splits_ptrs_vec(num_srcs);
int32_t tot_rows = 0, tot_elems = 0;
for (int32_t i = 0; i < num_srcs; i++) {
tot_rows += src[i]->TotSize(layer);
tot_elems += src[i]->TotSize(layer + 1);
row_splits_ptrs_vec[i] = src[i]->RowSplits(layer + 1).Data();
}
K2_CHECK_EQ(tot_rows, merge_map.Dim());
Array1<int32_t> row_splits_out(c, merge_map.Dim() + 1);
Array1<int32_t> row_ids_out(c, tot_elems);
const uint32_t *merge_map_data = merge_map.Data();
Array1<int32_t*> row_splits_ptrs(c, row_splits_ptrs_vec);
int32_t **row_splits_ptrs_data = row_splits_ptrs.Data();
int32_t *sizes_data = row_splits_out.Data();
K2_EVAL(c, tot_rows, lambda_set_sizes, (int32_t i) -> void {
uint32_t m = merge_map_data[i],
src = m % num_srcs,
pos = m / num_srcs;
int32_t size = row_splits_ptrs_data[src][pos + 1] -
row_splits_ptrs_data[src][pos];
sizes_data[i] = size;
});
ExclusiveSum(row_splits_out, &row_splits_out);
RowSplitsToRowIds(row_splits_out, &row_ids_out);
if (merge_map_out != nullptr) {
*merge_map_out = Array1<uint32_t>(c, tot_elems);
const int32_t *row_ids_data = row_ids_out.Data(),
*row_splits_data = row_splits_out.Data();
uint32_t *merge_map_out_data = merge_map_out->Data();
K2_EVAL(c, tot_elems, lambda_set_merge_map, (int32_t idx01) -> void {
int32_t idx0 = row_ids_data[idx01],
idx0x = row_splits_data[idx0],
idx1 = idx01 - idx0x,
m = merge_map_data[idx0],
src = m % num_srcs,
src_idx0 = m / num_srcs,
src_idx0x = row_splits_ptrs_data[src][src_idx0],
src_idx01 = src_idx0x + idx1;
// We multiply the src_idx01 by num_srcs as a way of encoding it and the
// src into a single integer.
merge_map_out_data[idx01] = uint32_t(src) +
((uint32_t)num_srcs * uint32_t(src_idx01));
});
}
return RaggedShape2(&row_splits_out, &row_ids_out, tot_elems);
}
RaggedShape SubsampleRaggedLayer(RaggedShape &src, int32_t layer,
int32_t subsample_factor) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GE(layer, 0);
K2_CHECK_LT(layer, src.NumAxes() - 1);
int32_t num_rows = src.TotSize(layer),
num_elems = src.TotSize(layer + 1);
K2_CHECK_EQ(src.TotSize(layer) % subsample_factor, 0);
ContextPtr &c = src.Context();
int32_t new_num_rows = num_rows / subsample_factor;
Array1<int32_t> new_row_splits(c, new_num_rows + 1),
new_row_ids(c, num_elems);
const int32_t *row_splits_data = src.RowSplits(layer + 1).Data(),
*row_ids_data = src.RowIds(layer + 1).Data();
int32_t *new_row_splits_data = new_row_splits.Data(),
*new_row_ids_data = new_row_ids.Data();
if (c->GetDeviceType() == kCpu) {
for (int32_t i = 0; i <= new_num_rows; i++)
new_row_splits_data[i] = row_splits_data[i * subsample_factor];
for (int32_t i = 0; i < num_elems; i++)
new_row_ids_data[i] = row_ids_data[i] / subsample_factor;
} else {
int32_t block_size = 32;
auto lambda_round_up = [=] (int32_t n) -> int32_t {
return block_size * ((n + block_size - 1) / block_size);
};
// this rounding is to avoid one warp having to do 2 jobs, which would slow
// down the code due to warp divergence.
int32_t num_elems_plus = lambda_round_up(num_elems);
auto lambda_set_row_splits_and_ids = [=] __device__(int32_t i) -> void {
if (i >= num_elems_plus) {
int32_t r = i - num_elems_plus;
new_row_splits_data[r] = row_splits_data[r * subsample_factor];
} else if (i < num_elems) {
new_row_ids_data[i] = row_ids_data[i] / subsample_factor;
}
};
EvalDevice(c, num_elems_plus + new_num_rows + 1,
lambda_set_row_splits_and_ids);
}
return RaggedShape2(&new_row_splits, &new_row_ids, num_elems);
}
} // namespace k2
|
the_stack
|
#include "common.cuh"
#include <kat/on_device/collaboration/warp.cuh>
#include <kat/on_device/collaboration/block.cuh>
#include <kat/on_device/sequence_ops/warp.cuh>
#include <kat/on_device/shuffle.cuh>
///@cond
#include <kat/detail/execution_space_specifiers.hpp>
///@endcond
namespace kat {
namespace linear_grid {
namespace collaborative {
using kat::collaborative::inclusivity_t;
namespace block {
/*
* TODO: Implement
* KAT_FD unsigned all_satisfy(unsigned int predicate, unsigned* scratch_area);
* KAT_FD unsigned none_satisfy(unsigned int predicate, unsigned* scratch_area);
* KAT_FD unsigned some_satisfy(unsigned int predicate, unsigned* scratch_area);
*
* at the block level
*
*/
// TODO: Currently kind-of assuming linear grids in several places in this file
// TODO: Check whether writing this with a forward iterator and std::advance
// yields the same PTX code (in which case we'll prefer that)
template <typename RandomAccessIterator, typename Size, typename T>
KAT_FD void fill_n(RandomAccessIterator start, Size count, const T& value)
{
auto f = [=](promoted_size_t<Size> pos) {
start[pos] = value;
};
at_block_stride(count, f);
}
template <typename RandomAccessIterator, typename T, typename Size = decltype(std::declval<RandomAccessIterator>() - std::declval<RandomAccessIterator>())>
KAT_FD void fill(RandomAccessIterator start, RandomAccessIterator end, const T& value)
{
Size count = end - start;
return fill_n(start, count, value);
}
template <typename RandomAccessIterator, typename Size>
KAT_FD void memzero_n(RandomAccessIterator start, Size count)
{
using value_type = typename std::iterator_traits<RandomAccessIterator>::value_type;
return fill_n(start, count, value_type{0});
}
template <typename RandomAccessIterator, typename Size = decltype(std::declval<RandomAccessIterator>() - std::declval<RandomAccessIterator>())>
KAT_FD void memzero(RandomAccessIterator start, RandomAccessIterator end)
{
auto count = end - start;
return memzero_n(start, count);
}
/**
* @brief apply a transformation to each element of an array, placing the results
* in another array.
*
* @param source The (block-common) origin of the data
* @param target The (block-common) destination into which to write the converted elements
* @param length The (block-common) number of elements available (for reading?] at the
* source
*/
template <typename T, typename S, typename UnaryOperation, typename Size>
KAT_FD void transform_n(
const S* __restrict__ source,
Size length,
T* __restrict__ target,
UnaryOperation unary_op)
{
auto f = [&](promoted_size_t<Size> pos) {
target[pos] = unary_op(source[pos]);
};
at_block_stride(length, f);
}
/**
* @note Prefer `copy_n()`; this will force the size to `ptrdiff_t`, which unnecessarily large.
*/
template <typename S, typename T, typename UnaryOperation, typename Size = std::ptrdiff_t>
KAT_FD void transform(
const S* __restrict__ source_start,
const S* __restrict__ source_end,
T* __restrict__ target,
UnaryOperation unary_op)
{
Size length = source_end - source_start;
return transform_n(source_start, length, target, unary_op);
}
/**
* Have all warp threads collaborate in copying
* data between two memory locations (possibly not in the same memory
* space), while also converting types.
*
* @param target The (block-common) destination into which to write the converted elements
* @param source The (block-common) origin of the data
* @param length The (block-common) number of elements available (for reading?] at the
* source
*/
template <typename S, typename T, typename Size>
KAT_FD void cast_and_copy_n(
const S* __restrict__ source,
Size length,
T* __restrict__ target)
{
auto op = [](S x) -> T { return T(x);} ;
return transform_n(source, length, target, op);
}
template <typename S, typename T, typename Size = std::ptrdiff_t>
KAT_FD void cast_and_copy(
const S* __restrict__ source_start,
const S* __restrict__ source_end,
T* __restrict__ target)
{
Size length = source_end - source_start;
return cast_and_copy_n(source_start, length, target);
}
/**
* @brief block-collaboratively copy data between stretches of memory
*
* @param source (block-common) location from which to copy data
* @param target (block-common) location into which to copy the first element
* @param length number of elements at @p source to copy
*/
template <typename T, typename Size>
KAT_FD void copy_n(
const T* __restrict__ source,
Size length,
T* __restrict__ target)
{
auto f = [=](promoted_size_t<Size> pos) {
target[pos] = source[pos];
};
at_block_stride(length, f);
}
/**
* @brief block-collaboratively copy data between stretches of memory
*
* @param source_start (block-common) location of the first data element to copy
* @param source_end (block-common) location past the last data element to copy
* @param target (block-common) location into which to copy the first element
*
* @note Prefer `copy_n()`; this will force the size to `ptrdiff_t`, which unnecessarily large.
*/
template <typename T, typename Size = std::ptrdiff_t>
KAT_FD void copy(
const T* __restrict__ source_start,
const T* __restrict__ source_end,
T* __restrict__ target)
{
Size length = source_end - source_start;
return copy_n(source_start, length, target);
}
/**
* Use a lookup table to convert numeric indices to a sequence
* of values of any type
*/
template <typename T, typename I, typename Size, typename U = T>
KAT_FD void lookup(
T* __restrict__ target,
const U* __restrict__ lookup_table,
const I* __restrict__ indices,
Size num_indices)
{
auto f = [=](promoted_size_t<Size> pos) {
target[pos] = lookup_table[indices[pos]];
};
at_block_stride(num_indices, f);
}
// TODO: Consider replacing the following with a functor on GSL-style array spans
namespace detail {
template<class Op> struct accumulator_op_return_type_helper : accumulator_op_return_type_helper<decltype(&Op::operator())> {};
template<class Op> struct accumulator_op_return_type_helper<Op(Op&)> { using type = Op; };
template<class Op> struct accumulator_op_return_type_helper<Op(Op&) const> { using type = Op; };
template<class Op> struct accumulator_op_return_type_helper<Op(*)(Op&)> { using type = Op; };
template<class C, class M> struct accumulator_op_return_type_helper<M (C::*)> : accumulator_op_return_type_helper<M> {};
template <typename Op>
using accumulator_op_return_type_t = typename accumulator_op_return_type_helper<Op>::type;
}
/**
* @brief Perform a reduction over a block's worth of data with a specific
* (asymmetric) accumulation operation, and maintaing the input element type.
*
* @param value each thread's contribution to the reduction
* @param op the accumulation operator - it must have the appropriate `operator()`,
* i.e. with signature `T AccumulationOp::operator()(T&, T)`. It does not have
* to have any other members or types defined (so a lambda works fine).
*
* @return for threads in the first warp of the block - the reduction result over
* all @p value elements of all block threads; for other threads - the
* result is undefined, in case @tparam AllThreadsObtainResult is false,
* or like the first warp if AllThreadsObtainResult is true
*
* @note This _should_ work without full block participation, but it
* does need full warp participation, i.e. each warp either participates fully
* or not at all.
*
* @note One might wonder: "Why insist on the same type for the result and the
* input?" - well, that is not necessary. However, separating the types would
* require additional template or parameter information: Two operators (if not
* more), and a decision at what point we switch to the result type - immediately,
* after at most k operations, above the warp level. This also makes it
* nearly impossible to write "simple" calls to reduce - with a value and
* a single lambda. We may at some point define a structure for setting these
* parameters, which will put some onus on the user code, but allow for
* this flexibility. Poke the library author/contributors about this.
*
* @tparam AllThreadsObtainResult when true, all threads in a block will
* return the reduction result; otherwise, only the first warp of the block
* is guaranteed to return the actual reduction result.
*
*/
template<
typename T,
typename AccumulationOp,
bool AllThreadsObtainResult = false,
T NeutralValue = T{}>
KAT_DEV T reduce(T value, AccumulationOp op)
{
namespace gi = kat::linear_grid::grid_info;
static __shared__ T warp_reductions[warp_size];
auto intra_warp_result = kat::collaborative::warp::reduce<T, AccumulationOp>(value, op);
collaborative::block::share_per_warp_data(intra_warp_result, warp_reductions, gi::warp::first_lane);
// Note: assuming here that there are at most 32 warps per block;
// if/when this changes, more warps may need to be involved in this second
// phase
if (not AllThreadsObtainResult) {
// We currently only guarantee the first thread has the final result,
// which is what allows most threads to return already:
if (not gi::warp::is_first_in_block()) { return NeutralValue; }
}
collaborative::block::barrier(); // Perhaps we can do with something weaker here?
// shared memory now holds all intra-warp reduction results
// read from shared memory only if that warp actually existed
auto other_warp_result = (gi::lane::id() < gi::block::num_warps()) ?
warp_reductions[gi::lane::id()] : NeutralValue;
return kat::collaborative::warp::reduce<T, AccumulationOp>(other_warp_result, op);
// TODO: Would it perhaps be faster to have only one warp compute this,
// and then use get_from_first_thread() ?
}
template<
typename T,
bool AllThreadsObtainResult = false>
KAT_DEV T sum(T value)
{
auto plus = [](T& x, T y) { x += y; };
return reduce<T, decltype(plus), AllThreadsObtainResult, T{}>(value, plus);
}
/**
*
* @note Supports only full-warps, and you should probably have
* the entire block participate.
*
* @param scratch
* @param value
* @return
*/
template <
typename T,
typename AccumulationOp,
bool Inclusivity = inclusivity_t::Inclusive,
T NeutralValue = T{}
>
KAT_DEV T scan(T value, AccumulationOp op, T* __restrict__ scratch)
{
auto intra_warp_inclusive_scan_result = kat::collaborative::warp::scan<
T, AccumulationOp, inclusivity_t::Inclusive, NeutralValue >(value, op);
auto last_active_lane_id =
// (AssumeFullWarps or not grid_info::warp::is_last_in_block()) ?
warp::last_lane
// : collaborative::warp::last_active_lane_index()
;
// Note: At the moment, we assume the block is not made up of full warps,
// as otherwise the last active lane may not be the last one - so no lane
// will write to shared memory. However, the assumptions is actually earlier,
// since our warp scan also assumes the participation of the full warp.
collaborative::block::share_per_warp_data(
intra_warp_inclusive_scan_result, scratch, last_active_lane_id);
// The last active lane writes, because only it has the whole warp's reduction value
collaborative::block::barrier();
// scratch buffer now holds all full-warp _reductions_;
if (warp::is_first_in_block()) {
// Note that for a block with less than warp_size warps, some of the lanes
// here will read junk data from the scratch area; but that's not a problem,
// since these values will not effect the scan results of previous lanes,
// and hence not affect any of the existing warps later on when they rely
// on what the first warp computes here.
auto warp_reductions_scan_result =
kat::collaborative::warp::scan<T, AccumulationOp, inclusivity_t::Exclusive, NeutralValue>(
scratch[lane::id()], op);
scratch[lane::id()] = warp_reductions_scan_result;
}
collaborative::block::barrier();
auto r = scratch[warp::id()];
T intra_warp_scan_result;
if (Inclusivity == inclusivity_t::Inclusive) {
intra_warp_scan_result = intra_warp_inclusive_scan_result;
}
else {
auto shuffled = shuffle_up(intra_warp_inclusive_scan_result, 1);
intra_warp_scan_result = lane::is_first() ? NeutralValue : shuffled;
}
op(r, intra_warp_scan_result);
return r;
}
template <
typename T,
typename AccumulationOp,
bool Inclusivity = inclusivity_t::Inclusive,
T NeutralValue = T{}
>
KAT_DEV T scan(T value, AccumulationOp op)
{
// Note the assumption there can no than warp_size warps per block
static __shared__ T scratch[warp_size];
return scan<T, AccumulationOp, Inclusivity, NeutralValue>(value, op, scratch);
}
/**
* Perform both a block-level scan and a block-level reduction,
* with each thread having the results of both.
*
* @note implementation relies on the details of the implementation
* of the scan primitive, above.
*
* @todo consider returning a pair rather than using non-const references
* @todo lots of code duplication with just-scan
* @todo add a bool template param allowing the code to assume the block is
* full (this saves a few ops)
*
*
* @param scratch An area of memory in which this primitive can use for
* inter-warp communication (as warps cannot communicate directly). It
* must have at least warp_size elements allocated
* (i.e. sizeof(ReductionOp::result_type)*warp_size bytes
* @param value Each thread provides its input value, and the scan is applied
* to them all as though they were in some input array
* @param scan_result the result of applying a scan to all threads' input values,
* in order of the thread indices
* @param reduction_result the result of reducing all threads' input values
*/
template <
typename T,
typename AccumulationOp,
bool Inclusivity = inclusivity_t::Inclusive,
T NeutralValue = T{}
>
KAT_DEV void scan_and_reduce(
T* __restrict__ scratch, // must have as many elements as there are warps
T value,
AccumulationOp op,
T& scan_result,
T& reduction_result)
{
auto intra_warp_inclusive_scan_result = kat::collaborative::warp::scan<
T, AccumulationOp, inclusivity_t::Inclusive, NeutralValue>(value, op);
auto last_active_lane_id =
// (AssumeFullWarps or not grid_info::warp::is_last_in_block()) ?
warp::last_lane
// : collaborative::warp::last_active_lane_index()
;
// Note: At the moment, we assume the block is not made up of full warps,
// as otherwise the last active lane may not be the last one - so no lane
// will write to shared memory. However, the assumptions is actually earlier,
// since our warp scan also assumes the participation of the full warp.
collaborative::block::share_per_warp_data(
intra_warp_inclusive_scan_result, scratch, last_active_lane_id);
// The last active lane writes, because only it has the whole warp's reduction value
// scratch[i] now contains the reduction result of the data of all threads in
// the i'th warp of this block
auto num_warps = block::num_warps();
auto partial_reduction_result = scratch[num_warps - 1];
// We're keeping this single-warp reduction result, since it will soon
// be overwritten
if (warp::is_first_in_block()) {
// Note that for a block with less than warp_size warps, some of the lanes
// here will read junk data from the scratch area; but that's not a problem,
// since these values will not effect the scan results of previous lanes,
// and hence not affect any of the existing warps later on when they rely
// on what the first warp computes here.
auto other_warp_reduction_result = scratch[lane::id()];
auto warp_reductions_scan_result = kat::collaborative::warp::scan<
T, AccumulationOp, inclusivity_t::Exclusive, NeutralValue>(
other_warp_reduction_result, op);
scratch[lane::id()] = warp_reductions_scan_result;
}
collaborative::block::barrier();
// scratch[i] now contains the reduction result of the data of all threads in
// warps 0 ... i-1 of this block
op(partial_reduction_result, scratch[num_warps - 1]);
// We had kept the last warp's reduction result, now we've taken
// the other warps into account as well
auto partial_scan_result = scratch[warp::id()]; // only a partial result for now
// To finalize the computation, we now account for the requested scan inclusivity
T intra_warp_scan_result;
if (Inclusivity == inclusivity_t::Inclusive) {
intra_warp_scan_result = intra_warp_inclusive_scan_result;
}
else {
// Note: We don't have a de-accumulation operator.
// TODO: Version of this function taking a de-accumulation operator
// which avoid this shuffle
T shuffled = shuffle_up(intra_warp_inclusive_scan_result, 1);
intra_warp_scan_result = lane::is_first() ? NeutralValue : shuffled;
}
op(partial_scan_result, intra_warp_scan_result);
reduction_result = partial_reduction_result;
scan_result = partial_scan_result;
}
template <
typename T,
typename AccumulationOp,
bool Inclusivity = inclusivity_t::Inclusive,
T NeutralValue = T{}
>
KAT_DEV void scan_and_reduce(
T value,
AccumulationOp op,
T& scan_result,
T& reduction_result)
{
// Note the assumption there can no than warp_size warps per block
static __shared__ T scratch[warp_size];
scan_and_reduce<T, AccumulationOp, Inclusivity, NeutralValue>(
scratch, value, op, scan_result, reduction_result);
}
/**
* Perform an accumulation operation (e.g. addition) between equal-sized arrays -
* with either regular or atomic semantics. Usable with memory locations which
* the entire block has the same view of and accessibility to (mostly shared
* and global, but not just those).
*
* @note
* 1. Assumes a linear block.
* 2. The operation is supposed to have the signature:
* WhateverWeDontCare operation(D& accumulator_element, S value)
* otherwise it might be a no-op here.
* 3. If you're having multiple blocks calling this function with the same
* destination, it will have to be atomic (as you cannot guarantee these blocks will
* not execute simultaneously, either on different multiprocessors or on the same
* multiprocessor). Also, if you want to use a global-mem source, you will
* need to pass this function block-specific offsets; remember it is not
* a kernel!
*
* @tparam D Destination data type
* @tparam S Source data type
* @tparam AccumulatingOperation Typically, one of the 'accumulator' substructures of
* the functors in liftedfunctions.hpp ; but it may very well be an accumulator::atomic
* substructure
* @tparam Size ... so that you don't have to decide whether you want to specify your
* number of elements as an int, uint, long long int, ulong long etc.
* @param[inout] destination The array into which we accumulate; holds existing data
* and is not simply overwritten.
* @param[in] source The array of partial data to integrate via accumulation.
* @param[in] length the length in elements of @p destination and @p source
*
* @todo consider taking a GSL-span-like parameter isntead of a ptr+length
*
* @todo Some inclusions in the block-primitives might only be relevant to the
* functions here; double-check.
*
* @todo consider using elementwise_apply for this.
*
*/
template <typename D, typename RandomAccessIterator, typename AccumulatingOperation, typename Size>
KAT_FD void elementwise_accumulate_n(
AccumulatingOperation op,
D* __restrict__ destination,
RandomAccessIterator __restrict__ source,
Size length)
{
auto accumulate_in_element = [&](promoted_size_t<Size> pos) {
op(destination[pos], source[pos]);
};
at_block_stride(length, accumulate_in_element);
}
template <typename D, typename RandomAccessIterator, typename AccumulatingOperation, typename Size = std::ptrdiff_t>
KAT_FD void elementwise_accumulate(
AccumulatingOperation op,
D* __restrict__ destination,
RandomAccessIterator __restrict__ source_start,
RandomAccessIterator __restrict__ source_end)
{
elementwise_accumulate_n(op, destination, source_start, source_end - source_start);
}
template <typename Operation, typename Size, typename ResultDatum, typename... Args>
KAT_FD void elementwise_apply(
ResultDatum* __restrict__ results,
Size length,
Operation op,
const Args* __restrict__ ... arguments)
{
auto f = [&](promoted_size_t<Size> pos) {
return op(arguments[pos]...);
};
at_block_stride(length, f);
}
} // namespace block
} // namespace collaborative
} // namespace linear_grid
} // namespace kat
#endif // CUDA_KAT_BLOCK_COLLABORATIVE_SEQUENCE_OPS_CUH_
|
the_stack
|
#include "libxomp.h"
//----------------------------------------------------
// Device xomp_cuda_property retrieving functions
#ifdef __cplusplus
extern "C" {
#endif
DDE** DDE_head;
DDE** DDE_tail;
int xomp_num_devices = -1;
int xomp_max_num_devices = -1; // -1 means un-initialized
void** xomp_cuda_prop;
#ifdef __cplusplus
}
#endif
bool xomp_verbose = false;
/* Set the device id to be used by the current task */
void xomp_set_default_device (int devID)
{
cudaError_t err;
assert (devID>=0 && devID< xomp_get_max_devices());
err = cudaSetDevice(devID);
if(err != cudaSuccess)
{
fprintf(stderr,"XOMP acc_init: %s %s %d\n", cudaGetErrorString(err), __FILE__, __LINE__);
exit(err);
}
}
/* Obtain the max number of devices supported by the hardware*/
int xomp_get_max_devices(void)
{
int rt;
cudaError_t err;
if (xomp_max_num_devices != -1)
return xomp_max_num_devices;
err = cudaGetDeviceCount(&rt);
if(err != cudaSuccess)
{
fprintf(stderr,"XOMP acc_init: %s %s %d\n", cudaGetErrorString(err), __FILE__, __LINE__);
exit(err);
}
xomp_max_num_devices = rt;
assert (rt != -1);
return xomp_max_num_devices;
}
/* The default number of devices to be used */
int xomp_get_num_devices (void)
{
char * env_var_str;
int env_var_val;
//If already initialized, return the value directly
if (xomp_num_devices!= -1)
{
// printf ("DEBUG: xomp_get_num_devices() returns the existing value %d\n", xomp_num_devices);
return xomp_num_devices;
}
// otherwise, obtain it based on max device count and env variable
if (xomp_max_num_devices == -1)
xomp_max_num_devices = xomp_get_max_devices();
env_var_str = getenv("OMP_NUM_DEVICES");
if (env_var_str != NULL)
{
sscanf(env_var_str, "%d", &env_var_val);
if (env_var_val <= 0)
{
printf ("OMP_NUM_DEVICES should > 0\n");
exit(1);
}
// cap the value
if (env_var_val > xomp_max_num_devices)
{
printf ("OMP_NUM_DEVICES %d is too big, set to max number of devices %d instead\n", env_var_val, xomp_max_num_devices );
env_var_val = xomp_max_num_devices ;
}
xomp_num_devices = env_var_val;
}
else
xomp_num_devices = xomp_max_num_devices;
// printf ("DEBUG: xomp_get_num_devices() returns a fresh value %d\n", xomp_num_devices);
return xomp_num_devices;
}
void omp_set_num_devices (int count)
{
assert (count>0);
xomp_num_devices = count;
}
void xomp_acc_init(void)
{
#if 0
cudaError_t err;
int maxDevice = 0;
err = cudaGetDeviceCount(&maxDevice);
if(err != cudaSuccess)
{
fprintf(stderr,"XOMP acc_init: %s %s %d\n", cudaGetErrorString(err), __FILE__, __LINE__);
exit(err);
}
#endif
// to be safe, we preallocate memory based on max device count
xomp_max_num_devices = xomp_get_max_devices();
DDE_head = (DDE**)calloc(1,sizeof(DDE*)*xomp_max_num_devices);
DDE_tail = (DDE**)calloc(1,sizeof(DDE*)*xomp_max_num_devices);
xomp_cuda_prop = (void**)calloc(1,sizeof(void*)*xomp_max_num_devices);
}
// this can be called multiple times. But the xomp_cuda_prop variable will only be set once
cudaDeviceProp * xomp_getCudaDeviceProp(int devID)
{
cudaDeviceProp* propPointer = NULL;
if (xomp_cuda_prop[devID] == NULL )
{
propPointer = (cudaDeviceProp *) malloc(sizeof(cudaDeviceProp));
xomp_cuda_prop[devID] = propPointer;
assert (xomp_cuda_prop[devID] != NULL);
int count;
cudaGetDeviceCount (&count);
assert (count>=1); // must have at least one GPU here
cudaGetDeviceProperties (propPointer, devID);
}
else
propPointer = (cudaDeviceProp *)xomp_cuda_prop[devID];
return propPointer;
}
void xomp_print_gpu_info(int devID)
{
int max_threads_per_block = xomp_getCudaDeviceProp(devID)->maxThreadsPerBlock;
int max_blocks_per_grid_x = xomp_getCudaDeviceProp(devID)->maxGridSize[0];
int global_memory_size = xomp_getCudaDeviceProp(devID)->totalGlobalMem;
int shared_memory_size = xomp_getCudaDeviceProp(devID)->sharedMemPerBlock;
int registers_per_block = xomp_getCudaDeviceProp(devID)->regsPerBlock;
printf ("Found a GPU with \n\tmax threads per block=%d, \n\tmax blocks for Grid X dimension=%d\n\
\tglobal mem bytes =%d, \n\tshared mem bytes =%d, \n\tregs per block = %d\n",
max_threads_per_block, max_blocks_per_grid_x, global_memory_size , shared_memory_size,
registers_per_block);
}
// A helper function to probe physical limits based on GPU Compute Capability numbers
// Reference: http://developer.download.nvidia.com/compute/cuda/CUDA_Occupancy_calculator.xls
size_t xomp_get_maxThreadBlocksPerMultiprocessor(int devID)
{
int major, minor;
assert (devID>=0 && devID<xomp_max_num_devices);
major = xomp_getCudaDeviceProp(devID)-> major;
minor = xomp_getCudaDeviceProp(devID)-> minor;
if (major <= 2) //1.x and 2.x: 8 blocks per multiprocessor
return 8;
else if (major == 3)
return 16;
else if (major == 5)
return 32;
else
{
printf("Error: xomp_get_maxThreadBlocksPerMultiprocessor(): unhandled Compute Capability numbers%d.%d \n", major, minor);
assert (false);
}
assert (false);
return 0;
}
// max thread per block, useful for 1-D problem
// The goal is to maximize GPU occupancy for each multiprocessor : physical max warps
// Reference: http://developer.download.nvidia.com/compute/cuda/CUDA_Occupancy_calculator.xls
//
// Two physical limits are considered for now
// 1) max-active-threads per multiprocessor
// 2) max active thread blocks per multiprocessor
// So for 1-D block, max threads per block = maxThreadsPerMultiProcessor / maxBlocks per multiprocessor
size_t xomp_get_maxThreadsPerBlock(int devID)
{
// this often causes oversubscription to the cores supported by GPU SM processors
//return xomp_getCudaDeviceProp()->maxThreadsPerBlock;
//return 128;
// 2.0: 1536/8= 192 threads per block
// 3.5 2048/16 = 128
return xomp_getCudaDeviceProp(devID)->maxThreadsPerMultiProcessor / xomp_get_maxThreadBlocksPerMultiprocessor(devID);
}
/*
* In order to ensure best performance, we setup max_block limitation here, so that each core in the GPU works on only one threads.
* Use XOMP_accelerator_loop_default() runtime to support input data size that exceeds max_block*xomp_get_maxThreadsPerBlock().
*/
size_t xomp_get_max1DBlock(int devID, size_t s)
{
#if 1
size_t block_num = s/xomp_get_maxThreadsPerBlock(devID);
if (s % xomp_get_maxThreadsPerBlock(devID)!= 0)
block_num ++;
//return block_num;
size_t max_block = xomp_getCudaDeviceProp(devID)->multiProcessorCount* xomp_get_maxThreadBlocksPerMultiprocessor(devID);
return block_num<max_block? block_num: max_block;
/* max threads per multiprocessor / threads-per-block * num_multiprocessor */
//return xomp_getCudaDeviceProp()->multiProcessorCount*(xomp_getCudaDeviceProp()->maxThreadsPerMultiProcessor /xomp_get_maxThreadsPerBlock()) ;
//return xomp_getCudaDeviceProp()->maxThreadsPerMultiProcessor /xomp_get_maxThreadsPerBlock() ;
#else
return xomp_getCudaDeviceProp()->multiProcessorCount* xomp_get_maxThreadBlocksPerMultiprocessor();
#endif
}
// Get the max number threads for one dimension (x or y) of a 2D block
// Two factors are considered: the total number of threads within the 2D block must<= total threads per block
// x * y <= maxThreadsPerBlock 512 or 1024
// each dimension: the number of threads must <= maximum x/y-dimension
// x <= maxThreadsDim[0], 1024
// y <= maxThreadsDim[1], 1024
// maxThreadsDim[0] happens to be equal to maxThreadsDim[1] so we use a single function to calculate max segments for both dimensions
size_t xomp_get_max_threads_per_dimesion_2D (int devID)
{
int max_threads_per_block = xomp_getCudaDeviceProp(devID)->maxThreadsPerBlock;
// we equalize the number of threads in each dimension
int max_threads_per_2d_dimension = (int)(sqrt((float)max_threads_per_block));
assert (max_threads_per_2d_dimension*max_threads_per_2d_dimension<= max_threads_per_block);
// our assumption is that dim[0] == dim[1] so we handle x and y in one function
assert ( xomp_getCudaDeviceProp(devID)->maxThreadsDim[0] == xomp_getCudaDeviceProp(devID)->maxThreadsDim[1]);
assert (max_threads_per_2d_dimension <= xomp_getCudaDeviceProp(devID)->maxThreadsDim[0]);
return max_threads_per_2d_dimension;
}
// return the max number of segments for a dimension (either x or y) of a 2D block
// we define the number of segments to be SIZE_of_Dimension_x/max_threads_x_dimension
size_t xomp_get_maxSegmentsPerDimensionOf2DBlock(int devID, size_t dimension_size)
{
// For simplicity, we don't yet consider the factor of warp size for now
// TODO: block size should be divisible by the warp size??
// e.g. max threads per block is 1024, then max number of tiles per dimension in a 2D block is 1024^0.5 = 32 threads
size_t max_threads_per_2d_dimension = xomp_get_max_threads_per_dimesion_2D (devID);
size_t block_num_x_or_y = dimension_size/max_threads_per_2d_dimension;
if (dimension_size % max_threads_per_2d_dimension != 0)
block_num_x_or_y ++;
return block_num_x_or_y;
}
/*-----------------------------------------------------
Device memory allocation functions
*/
void* xomp_deviceMalloc(size_t size)
{
void * devPtr;
cudaError_t rt = cudaMalloc(&devPtr, size);
if ( (size !=0) && (rt == cudaSuccess) )
{
return devPtr;
}
else
{
fprintf(stderr, "Error: cudaMalloc() failed to allocate the requested %d bytes!\n",size );
assert (false);
return NULL; // it is a bad idea to silently return a NULL pointer
}
}
// A host version
void* xomp_hostMalloc(size_t size)
{
assert (size>0);
void* hostPtr;
hostPtr = (char*) malloc (size);
if (hostPtr == NULL)
{
fprintf(stderr, "Error: malloc() failed to allocate the requested %d bytes!\n",size );
assert (hostPtr != NULL);
}
return hostPtr;
}
// memory copy from src to dest, return the pointer to dest. NULL pointer if anything is wrong
void * xomp_memcpyHostToDevice (void *dest, const void * src, size_t n)
{
assert (dest != NULL);
assert (src != NULL);
if (xomp_verbose)
printf("xomp_memcpyHostToDevice(): dest=%p src =%p size=%d\n",dest, src, n);
cudaError_t rt = cudaMemcpy (dest, src, n, cudaMemcpyHostToDevice);
if (rt == cudaSuccess)
return dest;
else
{
fprintf(stderr, "Error: cudaMemcpy() failed to copy memory from Host %p to Device %p, for %d bytes!\n",src, dest, n);
assert (false);
return NULL; // it is a bad idea to silently return a NULL pointer
}
}
void * xomp_memcpyDeviceToHost (void *dest, const void * src, size_t n)
{
assert (dest != NULL);
assert (src != NULL);
if (xomp_verbose)
printf("xomp_memcpyDeviceToHost(): dest=%p src =%p size=%d\n",dest, src, n);
cudaError_t rt = cudaMemcpy (dest, src, n, cudaMemcpyDeviceToHost);
if (rt == cudaSuccess)
return dest;
else
{
fprintf(stderr, "Error: cudaMemcpy() failed to copy memory from Device %p to Host %p, for %d bytes!\n",src, dest, n);
fprintf(stderr, "Error message is =%s\n",cudaGetErrorString(rt));
assert (false);
return NULL; // it is a bad idea to silently return a NULL pointer
}
}
// copy a dynamically allocated host source array to a linear dest address on a GPU device.
// The dimension information of the source array is given by: int dimensions[dimension_size], with known element size.
// bytes_copied reports the total bytes copied by this function.
// Liao 4/25/2012
void * xomp_memcpyDynamicHostToDevice (void *dest, const void * src, int * dimensions, size_t dimension_size, size_t element_size, size_t *bytes_copied)
{
assert (dest != NULL);
assert (src != NULL);
if (dimension_size == 1) // down to the final, inner-most dimension
{
// this is a firm, bottom count of bytes copied
*bytes_copied = element_size* dimensions[0]; // number of elements * element_size
xomp_memcpyHostToDevice (dest, src, *bytes_copied);
}
else
{
assert (dimension_size>=2);
// 2-D or more: arrays of sub-arrays, copy each sub-array separately
void ** array2 = (void**) src ; // re-interpret src to be array of arrays
size_t sub_array_count = dimensions[0]; // top dimension, how many sub-arrays
// prepare dimension information for sub-arrays
dimensions ++;// go to the next dimension
dimension_size --; // sub-array has one less dimension
char* new_dest = (char*) dest; // byte addressable for dest
size_t total_subarray_bytes_copied =0;
int i;
for (i = 0; i< sub_array_count; i++) // for each sub-arrays
{
size_t subarray_bytes_copied = 0;
void* sub_array_src = (void*) (array2[i]); // get start address for each sub-array
assert (sub_array_src != NULL);
// recursively call to copy each sub-array
xomp_memcpyDynamicHostToDevice (new_dest, (void*)sub_array_src, dimensions, dimension_size,
element_size, &subarray_bytes_copied);
total_subarray_bytes_copied += subarray_bytes_copied; // recursively accumulate the bytes copied, instead of calculating directly.
new_dest += subarray_bytes_copied ; // update the dest offset here
}
*bytes_copied = total_subarray_bytes_copied;
}
return dest;
}
// copy linear src memory to dynamically allocated destination, with dimension information given by
// int dimensions[dimension_size]
// the source memory has total n continuous memory, with known size for each element
// the total bytes copied by this function is reported by bytes_copied
// test code: mallocArray-xomp.cu
void * xomp_memcpyDynamicDeviceToHost (void *dest, int * dimensions, size_t dimension_size, const void * src, size_t element_size, size_t *bytes_copied)
{
// int dimensions[1] = {10}; dimension_size =1;
// a[10]:
if (dimension_size == 1) // down to the final dimension
{
// this is a firm, bottom count of bytes copied
*bytes_copied = element_size* dimensions[0]; // number of elements * element_size
xomp_memcpyDeviceToHost (dest, src, *bytes_copied);
}
else
{
int i;
assert (dimension_size>=2);
// 2-D or more: arrays of sub-arrays, copy each sub-array separately
void ** array2 = (void**) dest; // re-interpret dest to be array of arrays
size_t sub_array_count = dimensions[0]; // current dimension, how many sub-arrays
// prepare dimension information for sub-arrays
dimensions ++;// go to the next dimension
dimension_size --;
char* new_src = (char*) src; // byte addressable for src
size_t total_subarray_bytes_copied =0;
for (i = 0; i< sub_array_count; i++) // for each sub-arrays
{
size_t subarray_bytes_copied =0;
void* sub_array_dest = (void*) (array2[i]); // get start address for each sub-array
xomp_memcpyDynamicDeviceToHost ((void*)sub_array_dest, dimensions, dimension_size,
new_src, element_size, &subarray_bytes_copied);
total_subarray_bytes_copied += subarray_bytes_copied; // recursively accumulate the bytes copied, instead of calculating directly.
new_src += subarray_bytes_copied ; // update the source offset here
}
*bytes_copied = total_subarray_bytes_copied;
}
return dest;
}
void * xomp_memcpyDeviceToDevice (void *dest, const void * src, size_t n)
{
cudaError_t rt = cudaMemcpy (dest, src, n, cudaMemcpyDeviceToDevice);
if (rt == cudaSuccess)
return dest;
else
{
fprintf(stderr, "Error: cudaMemcpy() failed to copy from Device %p to Device %p for requested %d bytes!\n", src, dest, n );
assert (false);
return NULL; // it is a bad idea to silently return a NULL pointer
}
}
void * xomp_memcpyHostToHost (void *dest, const void * src, size_t n) // same as memcpy??
{
cudaError_t rt = cudaMemcpy (dest, src, n, cudaMemcpyHostToHost);
if (rt == cudaSuccess)
return dest;
else
{
fprintf(stderr, "Error: cudaMemcpy() failed to copy from Host %p to Host %p for requested %d bytes!\n", src, dest, n );
assert (false);
return NULL; // it is a bad idea to silently return a NULL pointer
}
}
//------------------------------------------------------
// free the device memory pointed by a pointer, return false in case of failure, otherwise return true
bool xomp_freeDevice(void* devPtr)
{
cudaError_t rt = cudaFree (devPtr);
if (rt == cudaSuccess)
return true;
else
return false;
}
// free the host memory pointed by a pointer, return false in case of failure, otherwise return true
bool xomp_freeHost(void* hostPtr)
{
cudaError_t rt = cudaFreeHost(hostPtr);
if (rt == cudaSuccess)
return true;
else
return false;
}
//------------------------------------------------------
// data set size checking functions
#if 0
// make sure the length of the array can be mapped to the cuda threads
assert (SIZE <= max_blocks_per_grid_x* max_threads_per_block);
// make sure the data will fit into the device memory (shared memory)
printf("matrix-vector multiplication with size=%d\n", SIZE);
// one matrix and two vectors
int mem_required = SIZE*SIZE*sizeof(float) + SIZE* sizeof(float) *2;
if (global_memory_size > 0) //sometimes the number is too large and it overflows to be a negative integer
assert (mem_required <= global_memory_size);
#endif
//------------------------------------------------------
#if 0
double xomp_time_stamp()
{
struct timeval t;
double time;
gettimeofday(&t, NULL);
time = t.tv_sec + 1.0e-6*t.tv_usec;
return time;
}
#endif
//------------------------------------------------------
// Host side helper functions
//--- a helper function to allocate 2-D arrays
/* Allocate a multi-dimensional array
*
* Input parameters:
* int *dimensions: an integer array storing the size of each dimension
* size_t dimension_num: the number of dimensions
* size_t esize: the size of an array element
*
* return:
* the pointer to the allocated array
* */
void * xomp_mallocArray(int * dimensions, size_t dimension_num, size_t esize)
{
int i;
void * array = NULL;
// if (xomp_verbose)
// printf("xomp_xomp_mallocArray(): dimensions=%p dimension =%d element size=%d\n",dimensions, dimension_num, esize);
// Handle 1-D array: do element-wise malloc
if ( dimension_num == 1)
{
array = (void *) malloc(dimensions[dimension_num - 1]*esize);
if(array == NULL)
{
fprintf(stderr, "out of memory\n");
abort();
}
}
else // two and more dimensions to be allocated: reduce it to be a 2-step allocation
{
// 1st step: allocate the first dimension
// by treating it as allocating a 1-D array of arrays (pointer)
void ** array2 = NULL;
array2 = (void **) xomp_mallocArray(dimensions, 1 ,sizeof (void *));
size_t prev_dim_size = dimensions[0];// number of elements of the first dimension
// 2nd step: allocate the remaining N -1 dimension arrays, each is an element of the first array
// peel off the 1st(previous) dimension, focus on the rest dimensions
dimensions ++;
// each of element is an array has a smaller dimension number
dimension_num --;
for(i = 0; i < prev_dim_size ; i++)
{
array2[i] = xomp_mallocArray (dimensions, dimension_num, esize);
}
// return the pointer to the first dimension
array = (void *) array2;
}
return array;
}
/* Free a pointer to a multi-dimensional array
* int * dimensions: store the sizes of each dimension
* size_t dimension_num: the number of dimensions
*
* */
void xomp_freeArrayPointer (void* array, int * dimensions, size_t dimension_num)
{
int i;
// 1-D case, call free() directly
if (dimension_num == 1)
{
free (array);
}
else
{ // 2-D or more, iterate through higher dimension and try to free inner arrays
int prev_dim_size = dimensions [0];
// step into one dimension
dimensions ++;
dimension_num --;
for (i =0; i< prev_dim_size ; i++)
{
xomp_freeArrayPointer (((void **)array)[i], dimensions, dimension_num);
}
}
}
#if 0
/* reduction minus is handled the same way as reduction plus since we just replace the reduction variable with its local copy for each thread
The associated statement is intact except for the variable replacement : e.g. a-=5 becomes local_a -= 5;
in the end of each thread accumulates thread local negative values.
At the block level, we just simply add them all to be the block level negative values
*/
/* we have to encode the type into function name since C function signature does not include parameter list! */
#define XOMP_INNER_BLOCK_REDUCTION_DEF(dtype) \
__device__ void xomp_inner_block_reduction_##dtype(dtype local_value, dtype * grid_level_results, int reduction_op) \
{ \
/* __shared__ float* sdata[gridDim.x]; not compilable */ \
/* block size of data, size is specified by the kernel launch parameter (3rd one) */ \
/* shared data has to have different names for different types. Cannot reuse name across types. */ \
extern __shared__ dtype sdata_##dtype[]; \
sdata_##dtype[threadIdx.x] = local_value; \
__syncthreads(); \
/* blockDim.x is the block size */ \
int isEvenSize = (blockDim.x % 2 ==0); \
/* contiguous range pattern: half folding and add */ \
for(int offset = blockDim.x / 2; \
offset > 0; /* folding and add */ \
offset >>= 1) /* offset shrinks half each time */ \
{ \
if(threadIdx.x < offset) \
{ \
/* add a partial sum upstream to our own */ \
switch (reduction_op){ \
case XOMP_REDUCTION_PLUS: \
case XOMP_REDUCTION_MINUS: \
sdata_##dtype[threadIdx.x] += sdata_##dtype[threadIdx.x + offset]; \
break; \
/* TODO add support for more operations*/ \
default: \
{ \
/* TODO: add assertion or set cudaError with an error code */ \
/* cannot call a host function */ \
/* fprintf (stderr, "Error. xomp_inner_block_reduction() unhandled reduction operation:%d\n",reduction_op); */ \
/* assert (false); */ \
} \
} /* end switch */ \
} \
/* remember to handle the left element */ \
if ((threadIdx.x == 0) && !isEvenSize) \
{ \
switch (reduction_op){ \
case XOMP_REDUCTION_PLUS: \
case XOMP_REDUCTION_MINUS: \
sdata_##dtype[0]+= sdata_##dtype[2*offset]; \
break; \
/* TODO add more operation support */ \
default: \
{ \
/* TODO: add assertion or set cudaError with an error code */ \
/* cannot call a host function */ \
/* fprintf (stderr, "Error. xomp_inner_block_reduction() unhandled reduction operation:%d\n",reduction_op); */ \
/* assert (false); */ \
} \
} /* end switch */ \
} \
isEvenSize = ( offset % 2 ==0); /* prepare next round*/ \
/* MUST wait until all threads in the block have updated their partial sums */ \
__syncthreads(); /* sync after each folding */ \
} \
/* thread 0 writes the final result to the partial sum of this thread block */ \
if(threadIdx.x == 0) \
{ \
grid_level_results[blockIdx.x] = sdata_##dtype[0]; \
} \
}
XOMP_INNER_BLOCK_REDUCTION_DEF(int)
XOMP_INNER_BLOCK_REDUCTION_DEF(float)
XOMP_INNER_BLOCK_REDUCTION_DEF(double)
#undef XOMP_INNER_BLOCK_REDUCTION_DEF
#endif
// TODO: handle more different reduction operations
// TODO : add assertion support
#define XOMP_BEYOND_BLOCK_REDUCTION_DEF(dtype) \
dtype xomp_beyond_block_reduction_##dtype(dtype * per_block_results, int numBlocks, int reduction_op) \
{ \
dtype result ; \
dtype* per_block_results_cpu = (dtype *)xomp_hostMalloc (numBlocks*sizeof(dtype)); \
xomp_memcpyDeviceToHost (per_block_results_cpu, per_block_results, sizeof(dtype)* numBlocks); \
int r_i; \
for (r_i =1; r_i < numBlocks; r_i++) \
{ \
switch (reduction_op){ \
case XOMP_REDUCTION_PLUS: \
case XOMP_REDUCTION_MINUS: \
per_block_results_cpu[0]+= per_block_results_cpu[r_i]; \
break; \
default: \
{ \
} \
} \
} \
result = per_block_results_cpu[0]; \
xomp_freeHost(per_block_results_cpu); \
return result; \
}
//TODO define more types of CPU level reduction support
XOMP_BEYOND_BLOCK_REDUCTION_DEF(int)
XOMP_BEYOND_BLOCK_REDUCTION_DEF(float)
XOMP_BEYOND_BLOCK_REDUCTION_DEF(double)
#undef XOMP_BEYOND_BLOCK_REDUCTION_DEF
/* some of the ompacc runtime API */
#if 0
int omp_get_num_devices() {
int deviceCount = 0;
cudaGetDeviceCount(&deviceCount);
return deviceCount;
}
#endif
//! A helper function to copy a mapped variable from src to desc
void copy_mapped_variable (struct XOMP_mapped_variable* desc, struct XOMP_mapped_variable* src)
{
assert (src != NULL);
assert (desc != NULL);
desc-> size = (int*)malloc(sizeof(int) * src->nDim);
desc-> offset = (int*)malloc(sizeof(int) * src->nDim);
desc-> DimSize = (int*)malloc(sizeof(int) * src->nDim);
desc->nDim = src->nDim;
desc->typeSize = src->typeSize;
desc->address = src->address;
int i;
for(i = 0; i < src->nDim; ++i)
{
desc->size[i]= src->size[i];
desc->offset[i]= src->offset[i];
desc->DimSize[i]= src->DimSize[i];
}
desc->dev_address = src ->dev_address;
// we do not want to inherit the copy directions or map-type of parent DDE's variable
// OpenMP 4.0 has the reuse enclosing data and discard map-type rule.
//desc->copyFrom= src ->copyFrom;
}
// create a new DDE-data node and
// append it to the end of the tracking list, and
// copy all variables from its parent node to be into the set of inherited variable set.
void xomp_deviceDataEnvironmentEnter(int devID)
{
// create a new DDE node and initialize it
DDE * data = (DDE *) malloc (sizeof (DDE));
assert (data!=NULL);
data->new_variable_count = 0;
data->inherited_variable_count = 0;
data->parent = NULL;
data->child= NULL;
data->devID= devID;
// For simplicity, we pre-allocate the storage for the list of variables
// TODO: improve the efficiency
data->new_variables = (struct XOMP_mapped_variable*) malloc (XOMP_MAX_MAPPED_VARS * sizeof (struct XOMP_mapped_variable));
data->inherited_variables = (struct XOMP_mapped_variable*) malloc (XOMP_MAX_MAPPED_VARS * sizeof (struct XOMP_mapped_variable));
// Append the data to the list
// Case 1: empty list, add as the first node, nothing else to do
if (DDE_tail[devID] == NULL)
{
assert (DDE_head[devID] == NULL );
DDE_head[devID] = data;
DDE_tail[devID] = data;
return;
}
// Case 2: non-empty list
// create double links
data->parent = DDE_tail[devID];
DDE_tail[devID]->child = data;
// shift the tail
DDE_tail[devID] = data;
// copy all variables from its parent node into the inherited variable set.
// Both new and inherited variables of the parent node become inherited for the current node
data->inherited_variable_count = data->parent->new_variable_count + data->parent->inherited_variable_count;
data->inherited_variables = (struct XOMP_mapped_variable*) malloc (data->inherited_variable_count * sizeof (struct XOMP_mapped_variable));
assert (data->inherited_variables != NULL);
int i;
int offset = 0;
for (i = 0; i < data->parent->new_variable_count; i++)
{
struct XOMP_mapped_variable* dest_element = data->inherited_variables + offset;
DDE* p = data->parent;
struct XOMP_mapped_variable* src_element = p->new_variables + i;
copy_mapped_variable(dest_element, src_element);
offset ++;
}
for (i = 0; i < data->parent->inherited_variable_count; i++)
{
//copy_mapped_variable(&((data->inherited_variables)[offset]), &( (data->parent->inherited_variables)[i]));
copy_mapped_variable( (struct XOMP_mapped_variable*) (data->inherited_variables + offset), (struct XOMP_mapped_variable*) (data->parent->inherited_variables + i));
offset ++;
}
assert (offset == data->inherited_variable_count);
}
// Check if an original variable is already mapped in enclosing data environment, return its device variable's address if yes.
// return NULL if not
void* xomp_deviceDataEnvironmentGetInheritedVariable (int devID, void* orig_var, int typeSize, int* size)
{
void * dev_address = NULL;
assert (orig_var != NULL);
int i;
// At this point, DDE list should not be empty
// At least a call to XOMP_Device_Data_Environment_Enter() should have finished before
assert ( DDE_tail[devID] != NULL );
for (i = 0; i < DDE_tail[devID]->inherited_variable_count; i++)
{
struct XOMP_mapped_variable* cur_var = DDE_tail[devID]->inherited_variables + i;
if (cur_var->address == orig_var)
{
dev_address = cur_var-> dev_address;
int i;
int matched = 1;
for(i=0; i < cur_var->nDim; ++i)
{
if(cur_var->size[i]*typeSize != size[i]*typeSize)
matched = 0;
}
if(matched)
break;
}
}
return dev_address;
}
//! Add a newly mapped variable into the current DDE's new variable list
void xomp_deviceDataEnvironmentAddVariable (int devID, void* var_addr, int* var_size, int* var_offset, int* var_dim, int nDim, int typeSize, void * dev_addr, bool copyTo, bool copyFrom)
{
// TODO: sanity check to avoid add duplicated variable or inheritable variable
assert ( DDE_tail[devID] != NULL );
struct XOMP_mapped_variable* mapped_var = DDE_tail[devID]->new_variables + DDE_tail[devID]->new_variable_count ;
mapped_var-> address = var_addr;
mapped_var-> size = (int*)malloc(sizeof(int) * nDim);
mapped_var-> offset = (int*)malloc(sizeof(int) * nDim);
mapped_var-> DimSize = (int*)malloc(sizeof(int) * nDim);
mapped_var->nDim = nDim;
mapped_var->typeSize = typeSize;
int i;
for(i = 0; i < nDim; ++i)
{
mapped_var-> size[i] = var_size[i];
mapped_var-> offset[i] = var_offset[i];
mapped_var-> DimSize[i] = var_dim[i];
}
mapped_var-> dev_address = dev_addr;
mapped_var-> copyTo= copyTo;
mapped_var-> copyFrom= copyFrom;
// now move up the offset
DDE_tail[devID]->new_variable_count ++;
}
void xomp_memGatherDeviceToHost(void* dest, void* src, int* vsize, int* voffset, int* vDimSize, int ndim, int typeSize)
{
int offset_src;
int offset_dest;
assert (ndim <= 3);
if(ndim == 1)
{
xomp_memcpyDeviceToHost((char*)dest+voffset[0]*typeSize, (char*)src, vsize[0]*typeSize);
}
else if(ndim == 2)
{
// vsize[1] stores the fastest-access dimension
int j;
for(j=0; j < vsize[0]; ++j)
{
offset_dest = voffset[1] + (j + voffset[0]) * vDimSize[1];
offset_src = j * vsize[1];
xomp_memcpyDeviceToHost((char*)dest+offset_dest*typeSize, (char*)src+offset_src*typeSize, vsize[1]*typeSize);
}
}
else if(ndim == 3)
{
int i,j;
for(j=0; j < vsize[2]; ++j)
{
offset_dest = voffset[0] + vDimSize[0]*( voffset[1] + vDimSize[1] * (j + voffset[2])) - vDimSize[0];
offset_src = vsize[1] * (j * vsize[2]) - vsize[0];
for(i=0; i < vsize[1]; ++i)
{
offset_dest += vDimSize[0];
offset_src += vsize[0];
xomp_memcpyDeviceToHost((char*)dest+offset_dest*typeSize, (char*)src+offset_src*typeSize, vsize[0]*typeSize);
}
}
}
}
void xomp_memScatterHostToDevice(void* dest, void* src, int* vsize, int* voffset, int* vDimSize, int ndim, int typeSize)
{
int offset_src;
int offset_dest;
assert (ndim <= 3);
if(ndim == 1)
{
xomp_memcpyHostToDevice((char*)dest, (char*)src+voffset[0]*typeSize, vsize[0]*typeSize);
}
else if(ndim == 2)
{
int j;
for(j=0; j < vsize[0]; ++j)
{
offset_src = voffset[1] + (j + voffset[0]) * vDimSize[1];
offset_dest = j * vsize[1];
xomp_memcpyHostToDevice((char*)dest+offset_dest*typeSize, (char*)src+offset_src*typeSize, vsize[1]*typeSize);
}
}
else if(ndim == 3)
{
int i,j;
for(j=0; j < vsize[2]; ++j)
{
//offset_src = voffset[0] + vDimSize[0]*( voffset[1] + vDimSize[1] * (j + voffset[2]) -1);
offset_src = (j+voffset[2])*vDimSize[0]*vDimSize[1] + voffset[1]*vDimSize[0] + voffset[0] - vDimSize[0];
offset_dest = j * vsize[1] * vsize[2] - vsize[0];
for(i=0; i < vsize[1]; ++i)
{
offset_src += vDimSize[0];
offset_dest += vsize[0];
xomp_memcpyHostToDevice((char*)dest+offset_dest*typeSize, (char*)src+offset_src*typeSize, vsize[0]*typeSize);
}
}
}
}
// All-in-one function to prepare device variable
void* xomp_deviceDataEnvironmentPrepareVariable(int devID, void* original_variable_address, int nDim, int typeSize, int* vsize, int* voffset, int* vDimSize, bool copy_into, bool copy_back)
{
// currently only handle one dimension
void* dev_var_address = NULL;
dev_var_address = xomp_deviceDataEnvironmentGetInheritedVariable (devID, original_variable_address, typeSize, vsize);
if (dev_var_address == NULL)
{
int devSize = 1;
for(int i=0; i < nDim; ++i)
{
devSize *= vsize[i];
}
dev_var_address = xomp_deviceMalloc(devSize*typeSize);
xomp_deviceDataEnvironmentAddVariable (devID, original_variable_address, vsize, voffset, vDimSize, nDim, typeSize, dev_var_address, copy_into, copy_back);
// The spec says : reuse enclosing data and discard map-type rule.
// So map-type only matters when no-reuse happens
if (copy_into)
{
xomp_memScatterHostToDevice(dev_var_address, original_variable_address, vsize, voffset, vDimSize, nDim, typeSize);
// xomp_memcpyHostToDevice(dev_var_address, original_variable_address, vsize[0]);
}
}
assert (dev_var_address != NULL);
return dev_var_address;
}
// Exit current DDE: copy back values if specified, deallocate memory, delete the DDE-data node from the end of the tracking list
void xomp_deviceDataEnvironmentExit(int devID)
{
assert ( DDE_tail[devID] != NULL );
// Deallocate mapped device variables which are allocated by this current DDE
// Optionally copy the value back to host if specified.
int i;
for (i = 0; i < DDE_tail[devID]->new_variable_count; i++)
{
struct XOMP_mapped_variable* mapped_var = DDE_tail[devID]->new_variables + i;
void * dev_address = mapped_var->dev_address;
if (mapped_var->copyFrom)
{
xomp_memGatherDeviceToHost(((void *)((char*)mapped_var->address)),((void *)((char *)mapped_var->dev_address)), mapped_var->size,mapped_var->offset,mapped_var->DimSize, mapped_var->nDim,mapped_var->typeSize);
//xomp_memcpyDeviceToHost(((void *)((char*)mapped_var->address+mapped_var->offset[0])),((const void *)mapped_var->dev_address), mapped_var->size[0]);
}
// free after copy back!!
xomp_freeDevice (dev_address); //TODO Will this work without type info? Looks so!
}
// Deallocate pre-allocated variable lists
free (DDE_tail[devID]->new_variables);
free (DDE_tail[devID]->inherited_variables);
// Delete the node from the tail
DDE * parent = DDE_tail[devID]->parent;
if (parent != NULL)
{
assert (DDE_tail[devID] == parent->child);
DDE_tail[devID] = parent;
free (parent->child);
parent->child = NULL;
}
else // last node in the list
{
free (DDE_tail[devID]);
DDE_head[devID] = NULL;
DDE_tail[devID] = NULL;
}
}
|
the_stack
|
#if ( MODEL == HYDRO && ( FLU_SCHEME == MHM || FLU_SCHEME == MHM_RP ) )
// external functions
#ifdef __CUDACC__
#include "CUFLU_Shared_FluUtility.cu"
#include "CUFLU_Shared_DataReconstruction.cu"
#include "CUFLU_Shared_ComputeFlux.cu"
#include "CUFLU_Shared_FullStepUpdate.cu"
#ifdef MHD
#include "CUFLU_Shared_ConstrainedTransport.cu"
#endif
#if ( RSOLVER == EXACT )
# include "CUFLU_Shared_RiemannSolver_Exact.cu"
#elif ( RSOLVER == ROE )
# include "CUFLU_Shared_RiemannSolver_Roe.cu"
#elif ( RSOLVER == HLLE )
# include "CUFLU_Shared_RiemannSolver_HLLE.cu"
#elif ( RSOLVER == HLLC )
# include "CUFLU_Shared_RiemannSolver_HLLC.cu"
#elif ( RSOLVER == HLLD )
# include "CUFLU_Shared_RiemannSolver_HLLD.cu"
#endif
#include "CUDA_ConstMemory.h"
#else // #ifdef __CUDACC__
void Hydro_DataReconstruction( const real g_ConVar [][ CUBE(FLU_NXT) ],
const real g_FC_B [][ SQR(FLU_NXT)*FLU_NXT_P1 ],
real g_PriVar [][ CUBE(FLU_NXT) ],
real g_FC_Var [][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_VAR) ],
real g_Slope_PPM[][NCOMP_LR ][ CUBE(N_SLOPE_PPM) ],
const bool Con2Pri, const LR_Limiter_t LR_Limiter, const real MinMod_Coeff,
const real dt, const real dh,
const real MinDens, const real MinPres, const real MinEint,
const bool FracPassive, const int NFrac, const int FracIdx[],
const bool JeansMinPres, const real JeansMinPres_Coeff,
const EoS_t *EoS );
void Hydro_ComputeFlux( const real g_FC_Var [][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_VAR) ],
real g_FC_Flux[][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_FLUX) ],
const int NFlux, const int NSkip_N, const int NSkip_T,
const bool CorrHalfVel, const real g_Pot_USG[], const double g_Corner[],
const real dt, const real dh, const double Time, const bool UsePot,
const OptExtAcc_t ExtAcc, const ExtAcc_t ExtAcc_Func, const double ExtAcc_AuxArray[],
const real MinDens, const real MinPres, const bool DumpIntFlux, real g_IntFlux[][NCOMP_TOTAL][ SQR(PS2) ],
const EoS_t *EoS );
void Hydro_FullStepUpdate( const real g_Input[][ CUBE(FLU_NXT) ], real g_Output[][ CUBE(PS2) ], char g_DE_Status[],
const real g_FC_B[][ PS2P1*SQR(PS2) ], const real g_Flux[][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_FLUX) ],
const real dt, const real dh, const real MinDens, const real MinEint,
const real DualEnergySwitch, const bool NormPassive, const int NNorm, const int NormIdx[],
const EoS_t *EoS );
#if ( RSOLVER == EXACT )
void Hydro_RiemannSolver_Exact( const int XYZ, real Flux_Out[], const real L_In[], const real R_In[],
const real MinDens, const real MinPres, const EoS_DE2P_t EoS_DensEint2Pres,
const EoS_DP2C_t EoS_DensPres2CSqr, const double EoS_AuxArray_Flt[],
const int EoS_AuxArray_Int[], const real* const EoS_Table[EOS_NTABLE_MAX] );
#elif ( RSOLVER == ROE )
void Hydro_RiemannSolver_Roe( const int XYZ, real Flux_Out[], const real L_In[], const real R_In[],
const real MinDens, const real MinPres, const EoS_DE2P_t EoS_DensEint2Pres,
const EoS_DP2C_t EoS_DensPres2CSqr, const double EoS_AuxArray_Flt[],
const int EoS_AuxArray_Int[], const real* const EoS_Table[EOS_NTABLE_MAX] );
#elif ( RSOLVER == HLLE )
void Hydro_RiemannSolver_HLLE( const int XYZ, real Flux_Out[], const real L_In[], const real R_In[],
const real MinDens, const real MinPres, const EoS_DE2P_t EoS_DensEint2Pres,
const EoS_DP2C_t EoS_DensPres2CSqr, const double EoS_AuxArray_Flt[],
const int EoS_AuxArray_Int[], const real* const EoS_Table[EOS_NTABLE_MAX] );
#elif ( RSOLVER == HLLC )
void Hydro_RiemannSolver_HLLC( const int XYZ, real Flux_Out[], const real L_In[], const real R_In[],
const real MinDens, const real MinPres, const EoS_DE2P_t EoS_DensEint2Pres,
const EoS_DP2C_t EoS_DensPres2CSqr, const double EoS_AuxArray_Flt[],
const int EoS_AuxArray_Int[], const real* const EoS_Table[EOS_NTABLE_MAX] );
#elif ( RSOLVER == HLLD )
void Hydro_RiemannSolver_HLLD( const int XYZ, real Flux_Out[], const real L_In[], const real R_In[],
const real MinDens, const real MinPres, const EoS_DE2P_t EoS_DensEint2Pres,
const EoS_DP2C_t EoS_DensPres2CSqr, const double EoS_AuxArray_Flt[],
const int EoS_AuxArray_Int[], const real* const EoS_Table[EOS_NTABLE_MAX] );
#endif
#if ( FLU_SCHEME == MHM_RP )
void Hydro_Con2Pri( const real In[], real Out[], const real MinPres,
const bool FracPassive, const int NFrac, const int FracIdx[],
const bool JeansMinPres, const real JeansMinPres_Coeff,
const EoS_DE2P_t EoS_DensEint2Pres, const EoS_DP2E_t EoS_DensPres2Eint,
const double EoS_AuxArray_Flt[], const int EoS_AuxArray_Int[],
const real *const EoS_Table[EOS_NTABLE_MAX], real* const EintOut );
#ifdef MHD
void MHD_ComputeElectric( real g_EC_Ele[][ CUBE(N_EC_ELE) ],
const real g_FC_Flux[][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_FLUX) ],
const real g_PriVar[][ CUBE(FLU_NXT) ],
const int NEle, const int NFlux, const int NPri, const int OffsetPri,
const real dt, const real dh,
const bool DumpIntEle, real g_IntEle[][NCOMP_ELE][ PS2P1*PS2 ],
const bool CorrHalfVel, const real g_Pot_USG[], const double g_Corner[], const double Time,
const bool UsePot, const OptExtAcc_t ExtAcc, const ExtAcc_t ExtAcc_Func,
const double ExtAcc_AuxArray[] );
void MHD_UpdateMagnetic( real *g_FC_Bx_Out, real *g_FC_By_Out, real *g_FC_Bz_Out,
const real g_FC_B_In[][ FLU_NXT_P1*SQR(FLU_NXT) ],
const real g_EC_Ele[][ CUBE(N_EC_ELE) ],
const real dt, const real dh, const int NOut, const int NEle, const int Offset_B_In );
#endif // #ifdef MHD
#endif // #if ( FLU_SCHEME == MHM_RP )
#endif // #ifdef __CUDACC__ ... else ...
// internal functions
#if ( FLU_SCHEME == MHM_RP )
GPU_DEVICE
static void Hydro_RiemannPredict_Flux( const real g_ConVar[][ CUBE(FLU_NXT) ],
real g_Flux_Half[][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_FLUX) ],
const real g_FC_B[][ SQR(FLU_NXT)*FLU_NXT_P1 ],
const real g_CC_B[][ CUBE(FLU_NXT) ],
const real MinDens, const real MinPres,
const EoS_t *EoS );
GPU_DEVICE
static void Hydro_RiemannPredict( const real g_ConVar_In[][ CUBE(FLU_NXT) ],
const real g_FC_B_Half[][ FLU_NXT_P1*SQR(FLU_NXT) ],
const real g_Flux_Half[][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_FLUX) ],
real g_PriVar_Half[][ CUBE(FLU_NXT) ],
const real dt, const real dh,
const real MinDens, const real MinPres, const real MinEint,
const bool FracPassive, const int NFrac, const int FracIdx[],
const bool JeansMinPres, const real JeansMinPres_Coeff,
const EoS_t *EoS );
#endif
//-------------------------------------------------------------------------------------------------------
// Function : CPU/CUFLU_FluidSolver_MHM
// Description : CPU/GPU fluid solver based on the MUSCL-Hancock scheme
//
// Note : 1. The three-dimensional evolution is achieved by using the unsplit method
// 2. Two half-step prediction schemes are supported, including "MHM" and "MHM_RP"
// MHM : use interpolated face-centered values to calculate the half-step fluxes
// MHM_RP : use Riemann solver to calculate the half-step fluxes
// 3. Ref :
// MHM : "Riemann Solvers and Numerical Methods for Fluid Dynamics
// - A Practical Introduction ~ by Eleuterio F. Toro"
// MHM_RP : Stone & Gardiner, NewA, 14, 139 (2009)
// 4. See include/CUFLU.h for the values and description of different symbolic constants
// such as N_FC_VAR, N_FC_FLUX, N_SLOPE_PPM, N_FL_FLUX, N_HF_VAR
// 5. Arrays with a prefix "g_" are stored in the global memory of GPU
//
// Parameter : g_Flu_Array_In : Array storing the input fluid variables
// g_Flu_Array_Out : Array to store the output fluid variables
// g_Mag_Array_In : Array storing the input B field (for MHD only)
// g_Mag_Array_Out : Array to store the output B field (for MHD only)
// g_DE_Array_Out : Array to store the dual-energy status
// g_Flux_Array : Array to store the output fluxes
// g_Ele_Array : Array to store the output electric field (for MHD only)
// g_Corner_Array : Array storing the physical corner coordinates of each patch group (for UNSPLIT_GRAVITY)
// g_Pot_Array_USG : Array storing the input potential for UNSPLIT_GRAVITY
// g_PriVar : Array to store the primitive variables
// g_Slope_PPM : Array to store the slope for the PPM reconstruction
// g_FC_Var : Array to store the half-step variables
// g_FC_Flux : Array to store the face-centered fluxes
// g_FC_Mag_Half : Array to store the half-step B field (for MHD only)
// g_EC_Ele : Array to store the edge-centered electric field (for MHD only)
// NPatchGroup : Number of patch groups to be evaluated
// dt : Time interval to advance solution
// dh : Cell size
// StoreFlux : true --> store the coarse-fine fluxes
// StoreElectric : true --> store the coarse-fine electric field
// LR_Limiter : Slope limiter for the data reconstruction in the MHM/MHM_RP/CTU schemes
// (0/1/2/3/4) = (vanLeer/generalized MinMod/vanAlbada/
// vanLeer + generalized MinMod/extrema-preserving) limiter
// MinMod_Coeff : Coefficient of the generalized MinMod limiter
// Time : Current physical time (for UNSPLIT_GRAVITY only)
// UsePot : Add self-gravity and/or external potential (for UNSPLIT_GRAVITY only)
// ExtAcc : Add external acceleration (for UNSPLIT_GRAVITY only)
// ExtAcc_Func : Function pointer to the external acceleration routine (for UNSPLIT_GRAVITY only)
// c_ExtAcc_AuxArray : Auxiliary array for adding external acceleration (for UNSPLIT_GRAVITY and CPU only)
// --> When using GPU, this array is stored in the constant memory header
// CUDA_ConstMemory.h and does not need to be passed as a function argument
// MinDens/Pres/Eint : Density, pressure, and internal energy floors
// DualEnergySwitch : Use the dual-energy formalism if E_int/E_kin < DualEnergySwitch
// NormPassive : true --> normalize passive scalars so that the sum of their mass density
// is equal to the gas mass density
// NNorm : Number of passive scalars to be normalized
// --> Should be set to the global variable "PassiveNorm_NVar"
// c_NormIdx : Target variable indices to be normalized
// --> Should be set to the global variable "PassiveNorm_VarIdx"
// --> When using GPU, this array is stored in the constant memory and does
// not need to be passed as a function argument
// --> Declared in CUDA_ConstMemory.h with the prefix "c_" to
// highlight that this is a constant variable on GPU
// FracPassive : true --> convert passive scalars to mass fraction during data reconstruction
// NFrac : Number of passive scalars for the option "FracPassive"
// --> Should be set to the global variable "PassiveIntFrac_NVar"
// c_FracIdx : Target variable indices for the option "FracPassive"
// --> Should be set to the global variable "PassiveIntFrac_VarIdx"
// --> When using GPU, this array is stored in the constant memory and does
// not need to be passed as a function argument
// --> Declared in CUDA_ConstMemory.h with the prefix "c_" to
// highlight that this is a constant variable on GPU
// JeansMinPres : Apply minimum pressure estimated from the Jeans length
// JeansMinPres_Coeff : Coefficient used by JeansMinPres = G*(Jeans_NCell*Jeans_dh)^2/(Gamma*pi);
// EoS : EoS object
//-------------------------------------------------------------------------------------------------------
#ifdef __CUDACC__
__global__
void CUFLU_FluidSolver_MHM(
const real g_Flu_Array_In [][NCOMP_TOTAL][ CUBE(FLU_NXT) ],
real g_Flu_Array_Out[][NCOMP_TOTAL][ CUBE(PS2) ],
const real g_Mag_Array_In [][NCOMP_MAG][ FLU_NXT_P1*SQR(FLU_NXT) ],
real g_Mag_Array_Out[][NCOMP_MAG][ PS2P1*SQR(PS2) ],
char g_DE_Array_Out [][ CUBE(PS2) ],
real g_Flux_Array [][9][NCOMP_TOTAL][ SQR(PS2) ],
real g_Ele_Array [][9][NCOMP_ELE][ PS2P1*PS2 ],
const double g_Corner_Array [][3],
const real g_Pot_Array_USG[][ CUBE(USG_NXT_F) ],
real g_PriVar [] [NCOMP_LR ][ CUBE(FLU_NXT) ],
real g_Slope_PPM [][3][NCOMP_LR ][ CUBE(N_SLOPE_PPM) ],
real g_FC_Var [][6][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_VAR) ],
real g_FC_Flux [][3][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_FLUX) ],
real g_FC_Mag_Half [][NCOMP_MAG][ FLU_NXT_P1*SQR(FLU_NXT) ],
real g_EC_Ele [][NCOMP_MAG][ CUBE(N_EC_ELE) ],
const real dt, const real dh,
const bool StoreFlux, const bool StoreElectric,
const LR_Limiter_t LR_Limiter, const real MinMod_Coeff, const double Time,
const bool UsePot, const OptExtAcc_t ExtAcc, const ExtAcc_t ExtAcc_Func,
const real MinDens, const real MinPres, const real MinEint,
const real DualEnergySwitch,
const bool NormPassive, const int NNorm,
const bool FracPassive, const int NFrac,
const bool JeansMinPres, const real JeansMinPres_Coeff,
const EoS_t EoS )
#else
void CPU_FluidSolver_MHM(
const real g_Flu_Array_In [][NCOMP_TOTAL][ CUBE(FLU_NXT) ],
real g_Flu_Array_Out[][NCOMP_TOTAL][ CUBE(PS2) ],
const real g_Mag_Array_In [][NCOMP_MAG][ FLU_NXT_P1*SQR(FLU_NXT) ],
real g_Mag_Array_Out[][NCOMP_MAG][ PS2P1*SQR(PS2) ],
char g_DE_Array_Out [][ CUBE(PS2) ],
real g_Flux_Array [][9][NCOMP_TOTAL][ SQR(PS2) ],
real g_Ele_Array [][9][NCOMP_ELE][ PS2P1*PS2 ],
const double g_Corner_Array [][3],
const real g_Pot_Array_USG[][ CUBE(USG_NXT_F) ],
real g_PriVar [] [NCOMP_LR ][ CUBE(FLU_NXT) ],
real g_Slope_PPM [][3][NCOMP_LR ][ CUBE(N_SLOPE_PPM) ],
real g_FC_Var [][6][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_VAR) ],
real g_FC_Flux [][3][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_FLUX) ],
real g_FC_Mag_Half [][NCOMP_MAG][ FLU_NXT_P1*SQR(FLU_NXT) ],
real g_EC_Ele [][NCOMP_MAG][ CUBE(N_EC_ELE) ],
const int NPatchGroup,
const real dt, const real dh,
const bool StoreFlux, const bool StoreElectric,
const LR_Limiter_t LR_Limiter, const real MinMod_Coeff, const double Time,
const bool UsePot, const OptExtAcc_t ExtAcc, const ExtAcc_t ExtAcc_Func,
const double c_ExtAcc_AuxArray[],
const real MinDens, const real MinPres, const real MinEint,
const real DualEnergySwitch,
const bool NormPassive, const int NNorm, const int c_NormIdx[],
const bool FracPassive, const int NFrac, const int c_FracIdx[],
const bool JeansMinPres, const real JeansMinPres_Coeff,
const EoS_t EoS )
#endif // #ifdef __CUDACC__ ... else ...
{
# ifdef UNSPLIT_GRAVITY
const bool CorrHalfVel = true;
# else
const bool CorrHalfVel = false;
# endif
# if ( FLU_SCHEME == MHM )
const bool Con2Pri_Yes = true;
# elif ( FLU_SCHEME == MHM_RP )
const bool Con2Pri_No = false;
# endif
# ifdef MHD
const bool CorrHalfVel_No = false;
const bool StoreElectric_No = false;
# endif
# if ( defined __CUDACC__ && !defined GRAVITY )
const double *c_ExtAcc_AuxArray = NULL;
# endif
// openmp pragma for the CPU solver
# ifndef __CUDACC__
# pragma omp parallel
# endif
{
// point to the arrays associated with different OpenMP threads (for CPU) or CUDA thread blocks (for GPU)
# ifdef __CUDACC__
const int array_idx = blockIdx.x;
# else
# ifdef OPENMP
const int array_idx = omp_get_thread_num();
# else
const int array_idx = 0;
# endif
# endif // #ifdef __CUDACC__ ... else ...
real (*const g_FC_Var_1PG )[NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_VAR) ] = g_FC_Var [array_idx];
real (*const g_FC_Flux_1PG )[NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_FLUX) ] = g_FC_Flux [array_idx];
real (*const g_PriVar_1PG ) [ CUBE(FLU_NXT) ] = g_PriVar [array_idx];
real (*const g_Slope_PPM_1PG)[NCOMP_LR ][ CUBE(N_SLOPE_PPM) ] = g_Slope_PPM[array_idx];
# if ( FLU_SCHEME == MHM_RP )
real (*const g_Flux_Half_1PG)[NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_FLUX) ] = g_FC_Flux_1PG;
real (*const g_PriVar_Half_1PG ) [ CUBE(FLU_NXT) ] = g_PriVar_1PG;
# ifdef MHD
real (*const g_FC_Mag_Half_1PG)[ FLU_NXT_P1*SQR(FLU_NXT) ] = g_FC_Mag_Half[array_idx];
real (*const g_EC_Ele_1PG )[ CUBE(N_EC_ELE) ] = g_EC_Ele [array_idx];
# else
real (*const g_FC_Mag_Half_1PG)[ FLU_NXT_P1*SQR(FLU_NXT) ] = NULL;
# endif
# endif // if ( FLU_SCHEME == MHM_RP )
// loop over all patch groups
// --> CPU/GPU solver: use different (OpenMP threads) / (CUDA thread blocks)
// to work on different patch groups
# ifdef __CUDACC__
const int P = blockIdx.x;
# else
# pragma omp for schedule( runtime )
for (int P=0; P<NPatchGroup; P++)
# endif
{
// 1. half-step prediction
// 1-a. MHM_RP: use Riemann solver to calculate the half-step fluxes
# if ( FLU_SCHEME == MHM_RP )
# ifdef MHD
// 1-a-1. evaluate the cell-centered B field and store in g_PriVar[]
// --> also copy density and compute velocity for MHD_ComputeElectric()
real CC_B[NCOMP_MAG];
CGPU_LOOP( idx, CUBE(FLU_NXT) )
{
const int size_ij = SQR( FLU_NXT );
const int i = idx % FLU_NXT;
const int j = idx % size_ij / FLU_NXT;
const int k = idx / size_ij;
// density and velocity
const real Dens = g_Flu_Array_In[P][0][idx];
const real _Dens = (real)1.0/Dens;
g_PriVar_1PG[0][idx] = Dens;
g_PriVar_1PG[1][idx] = g_Flu_Array_In[P][1][idx]*_Dens;
g_PriVar_1PG[2][idx] = g_Flu_Array_In[P][2][idx]*_Dens;
g_PriVar_1PG[3][idx] = g_Flu_Array_In[P][3][idx]*_Dens;
// magnetic field
MHD_GetCellCenteredBField( CC_B, g_Mag_Array_In[P][0], g_Mag_Array_In[P][1], g_Mag_Array_In[P][2],
FLU_NXT, FLU_NXT, FLU_NXT, i, j, k );
for (int v=0; v<NCOMP_MAG; v++) g_PriVar_1PG[ MAG_OFFSET + v ][idx] = CC_B[v];
}
# ifdef __CUDACC__
__syncthreads();
# endif
# endif // #ifdef MHD
// 1-a-2. evaluate the half-step first-order fluxes by Riemann solver
Hydro_RiemannPredict_Flux( g_Flu_Array_In[P], g_Flux_Half_1PG, g_Mag_Array_In[P], g_PriVar_1PG+MAG_OFFSET,
MinDens, MinPres, &EoS );
// 1-a-3. evaluate electric field and update B field at the half time-step
# ifdef MHD
MHD_ComputeElectric( g_EC_Ele_1PG, g_Flux_Half_1PG, g_PriVar_1PG, N_HF_ELE, N_HF_FLUX,
FLU_NXT, 0, dt, dh, StoreElectric_No, NULL,
CorrHalfVel_No, NULL, NULL, NULL_REAL,
EXT_POT_NONE, EXT_ACC_NONE, NULL, NULL );
MHD_UpdateMagnetic( g_FC_Mag_Half_1PG[0], g_FC_Mag_Half_1PG[1], g_FC_Mag_Half_1PG[2],
g_Mag_Array_In[P], g_EC_Ele_1PG, (real)0.5*dt, dh, N_HF_VAR, N_HF_ELE, 1 );
# endif
// 1-a-4. evaluate the half-step solutions
Hydro_RiemannPredict( g_Flu_Array_In[P], g_FC_Mag_Half_1PG, g_Flux_Half_1PG, g_PriVar_Half_1PG,
dt, dh, MinDens, MinPres, MinEint, FracPassive, NFrac, c_FracIdx,
JeansMinPres, JeansMinPres_Coeff, &EoS );
// 1-a-5. evaluate the face-centered values by data reconstruction
// --> note that g_PriVar_Half_1PG[] returned by Hydro_RiemannPredict() stores the primitive variables
Hydro_DataReconstruction( NULL, g_FC_Mag_Half_1PG, g_PriVar_Half_1PG, g_FC_Var_1PG, g_Slope_PPM_1PG,
Con2Pri_No, LR_Limiter, MinMod_Coeff, dt, dh,
MinDens, MinPres, MinEint, FracPassive, NFrac, c_FracIdx,
JeansMinPres, JeansMinPres_Coeff, &EoS );
// 1-b. MHM: use interpolated face-centered values to calculate the half-step fluxes
# elif ( FLU_SCHEME == MHM )
// evaluate the face-centered values by data reconstruction
Hydro_DataReconstruction( g_Flu_Array_In[P], NULL, g_PriVar_1PG, g_FC_Var_1PG, g_Slope_PPM_1PG,
Con2Pri_Yes, LR_Limiter, MinMod_Coeff, dt, dh,
MinDens, MinPres, MinEint, FracPassive, NFrac, c_FracIdx,
JeansMinPres, JeansMinPres_Coeff, &EoS );
# endif // #if ( FLU_SCHEME == MHM_RP ) ... else ...
// 2. evaluate the full-step fluxes
# ifdef MHD
const int NSkip_N = 0;
const int NSkip_T = 0;
# else
const int NSkip_N = 0;
const int NSkip_T = 1;
# endif
Hydro_ComputeFlux( g_FC_Var_1PG, g_FC_Flux_1PG, N_FL_FLUX, NSkip_N, NSkip_T,
CorrHalfVel, g_Pot_Array_USG[P], g_Corner_Array[P],
dt, dh, Time, UsePot, ExtAcc, ExtAcc_Func, c_ExtAcc_AuxArray,
MinDens, MinPres, StoreFlux, g_Flux_Array[P], &EoS );
// 3. evaluate electric field and update B field at the full time-step
// --> must update B field before Hydro_FullStepUpdate() since the latter requires
// the updated magnetic energy when adopting the dual-energy formalism
# ifdef MHD
MHD_ComputeElectric( g_EC_Ele_1PG, g_FC_Flux_1PG, g_PriVar_Half_1PG, N_FL_ELE, N_FL_FLUX,
N_HF_VAR, LR_GHOST_SIZE, dt, dh, StoreElectric, g_Ele_Array[P],
CorrHalfVel, g_Pot_Array_USG[P], g_Corner_Array[P], Time,
UsePot, ExtAcc, ExtAcc_Func, c_ExtAcc_AuxArray );
MHD_UpdateMagnetic( g_Mag_Array_Out[P][0], g_Mag_Array_Out[P][1], g_Mag_Array_Out[P][2],
g_Mag_Array_In[P], g_EC_Ele_1PG, dt, dh, PS2, N_FL_ELE, FLU_GHOST_SIZE );
# endif
// 4. full-step evolution
Hydro_FullStepUpdate( g_Flu_Array_In[P], g_Flu_Array_Out[P], g_DE_Array_Out[P], g_Mag_Array_Out[P],
g_FC_Flux_1PG, dt, dh, MinDens, MinEint, DualEnergySwitch,
NormPassive, NNorm, c_NormIdx, &EoS );
} // loop over all patch groups
} // OpenMP parallel region
} // FUNCTION : CPU_FluidSolver_MHM
#if ( FLU_SCHEME == MHM_RP )
//-------------------------------------------------------------------------------------------------------
// Function : Hydro_RiemannPredict_Flux
// Description : Evaluate the half-step face-centered fluxes by Riemann solver
//
// Note : 1. Work for the MHM_RP scheme
// 2. Currently support the exact, HLLC, HLLE, HLLD, and Roe solvers
// 3. g_Flux_Half[] is accessed with a stride N_HF_FLUX
// --> Fluxes on the **left** face of the (i+1,j+1,k+1) element in g_ConVar[] will
// be stored in the (i,j,k) element of g_Flux_Half[]
//
// Parameter : g_ConVar : Array storing the input conserved variables
// g_Flux_Half : Array to store the output face-centered fluxes
// g_FC_B : Array storing the input face-centered magnetic field (for MHD only)
// --> Accessed with strides FLU_NXT/FLU_NXT+1 along the
// transverse/longitudinal directions
// g_CC_B : Array storing the input cell-centered magnetic field (for MHD only)
// --> Accessed with a stride FLU_NXT
// MinDens/Pres : Density and pressure floors
// EoS : EoS object
//-------------------------------------------------------------------------------------------------------
GPU_DEVICE
void Hydro_RiemannPredict_Flux( const real g_ConVar[][ CUBE(FLU_NXT) ],
real g_Flux_Half[][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_FLUX) ],
const real g_FC_B[][ SQR(FLU_NXT)*FLU_NXT_P1 ],
const real g_CC_B[][ CUBE(FLU_NXT) ],
const real MinDens, const real MinPres,
const EoS_t *EoS )
{
const int didx_cvar[3] = { 1, FLU_NXT, SQR(FLU_NXT) };
real ConVar_L[NCOMP_TOTAL_PLUS_MAG], ConVar_R[NCOMP_TOTAL_PLUS_MAG], Flux_1Face[NCOMP_TOTAL_PLUS_MAG];
// loop over different spatial directions
for (int d=0; d<3; d++)
{
# ifdef MHD
const int TDir1 = (d+1)%3; // transverse direction 1
const int TDir2 = (d+2)%3; // transverse direction 2
const int stride_fc_B[3] = { 1, FLU_NXT, SQR(FLU_NXT) };
int sizeB_i, sizeB_j;
# endif
int i_cvar_s=0, j_cvar_s=0, k_cvar_s=0, size_i, size_j, size_k;
switch ( d )
{
# ifdef MHD
case 0 : size_i = N_HF_FLUX-1; size_j = N_HF_FLUX-0; size_k = N_HF_FLUX-0;
sizeB_i = FLU_NXT_P1; sizeB_j = FLU_NXT;
break;
case 1 : size_i = N_HF_FLUX-0; size_j = N_HF_FLUX-1; size_k = N_HF_FLUX-0;
sizeB_i = FLU_NXT; sizeB_j = FLU_NXT_P1;
break;
case 2 : size_i = N_HF_FLUX-0; size_j = N_HF_FLUX-0; size_k = N_HF_FLUX-1;
sizeB_i = FLU_NXT; sizeB_j = FLU_NXT;
break;
# else // #ifdef MHD
case 0 : i_cvar_s = 0; j_cvar_s = 1; k_cvar_s = 1;
size_i = N_HF_FLUX-0; size_j = N_HF_FLUX-1; size_k = N_HF_FLUX-1;
break;
case 1 : i_cvar_s = 1; j_cvar_s = 0; k_cvar_s = 1;
size_i = N_HF_FLUX-1; size_j = N_HF_FLUX-0; size_k = N_HF_FLUX-1;
break;
case 2 : i_cvar_s = 1; j_cvar_s = 1; k_cvar_s = 0;
size_i = N_HF_FLUX-1; size_j = N_HF_FLUX-1; size_k = N_HF_FLUX-0;
break;
# endif // #ifdef MHD ... else ...
} // switch ( d )
const int size_ij = size_i*size_j;
CGPU_LOOP( idx, size_i*size_j*size_k )
{
const int i_flux = idx % size_i;
const int j_flux = idx % size_ij / size_i;
const int k_flux = idx / size_ij;
const int idx_flux = IDX321( i_flux, j_flux, k_flux, N_HF_FLUX, N_HF_FLUX );
const int i_cvar = i_flux + i_cvar_s;
const int j_cvar = j_flux + j_cvar_s;
const int k_cvar = k_flux + k_cvar_s;
const int idx_cvar = IDX321( i_cvar, j_cvar, k_cvar, FLU_NXT, FLU_NXT );
// get the left and right fluid variables
for (int v=0; v<NCOMP_TOTAL; v++)
{
ConVar_L[v] = g_ConVar[v][ idx_cvar ];
ConVar_R[v] = g_ConVar[v][ idx_cvar + didx_cvar[d] ];
}
// get the left and right B field
# ifdef MHD
// longitudinal component is face-centered
const int idx_fc_B = IDX321( i_cvar, j_cvar, k_cvar, sizeB_i, sizeB_j ) + stride_fc_B[d];
ConVar_L[ MAG_OFFSET + d ] = g_FC_B[d][idx_fc_B];
ConVar_R[ MAG_OFFSET + d ] = g_FC_B[d][idx_fc_B];
// transverse components are cell-centered
ConVar_L[ MAG_OFFSET + TDir1 ] = g_CC_B[TDir1][ idx_cvar ];
ConVar_L[ MAG_OFFSET + TDir2 ] = g_CC_B[TDir2][ idx_cvar ];
ConVar_R[ MAG_OFFSET + TDir1 ] = g_CC_B[TDir1][ idx_cvar + didx_cvar[d] ];
ConVar_R[ MAG_OFFSET + TDir2 ] = g_CC_B[TDir2][ idx_cvar + didx_cvar[d] ];
// correct total energy by the difference between the face- and cell-centered longitudinal B field
ConVar_L[4] += (real)0.5*( SQR( ConVar_L[MAG_OFFSET + d] ) - SQR( g_CC_B[d][idx_cvar ] ) );
ConVar_R[4] += (real)0.5*( SQR( ConVar_R[MAG_OFFSET + d] ) - SQR( g_CC_B[d][idx_cvar + didx_cvar[d]] ) );
# endif
// invoke the Riemann solver
# if ( RSOLVER == EXACT && !defined MHD )
Hydro_RiemannSolver_Exact( d, Flux_1Face, ConVar_L, ConVar_R, MinDens, MinPres,
EoS->DensEint2Pres_FuncPtr, EoS->DensPres2CSqr_FuncPtr,
EoS->AuxArrayDevPtr_Flt, EoS->AuxArrayDevPtr_Int, EoS->Table );
# elif ( RSOLVER == ROE )
Hydro_RiemannSolver_Roe ( d, Flux_1Face, ConVar_L, ConVar_R, MinDens, MinPres,
EoS->DensEint2Pres_FuncPtr, EoS->DensPres2CSqr_FuncPtr,
EoS->AuxArrayDevPtr_Flt, EoS->AuxArrayDevPtr_Int, EoS->Table );
# elif ( RSOLVER == HLLE )
Hydro_RiemannSolver_HLLE ( d, Flux_1Face, ConVar_L, ConVar_R, MinDens, MinPres,
EoS->DensEint2Pres_FuncPtr, EoS->DensPres2CSqr_FuncPtr,
EoS->AuxArrayDevPtr_Flt, EoS->AuxArrayDevPtr_Int, EoS->Table );
# elif ( RSOLVER == HLLC && !defined MHD )
Hydro_RiemannSolver_HLLC ( d, Flux_1Face, ConVar_L, ConVar_R, MinDens, MinPres,
EoS->DensEint2Pres_FuncPtr, EoS->DensPres2CSqr_FuncPtr,
EoS->AuxArrayDevPtr_Flt, EoS->AuxArrayDevPtr_Int, EoS->Table );
# elif ( RSOLVER == HLLD && defined MHD )
Hydro_RiemannSolver_HLLD ( d, Flux_1Face, ConVar_L, ConVar_R, MinDens, MinPres,
EoS->DensEint2Pres_FuncPtr, EoS->DensPres2CSqr_FuncPtr,
EoS->AuxArrayDevPtr_Flt, EoS->AuxArrayDevPtr_Int, EoS->Table );
# else
# error : ERROR : unsupported Riemann solver (EXACT/ROE/HLLE/HLLC/HLLD) !!
# endif
// store the results in g_Flux_Half[]
for (int v=0; v<NCOMP_TOTAL_PLUS_MAG; v++) g_Flux_Half[d][v][idx_flux] = Flux_1Face[v];
} // CGPU_LOOP( idx, N_HF_FLUX*SQR(N_HF_FLUX-1) )
} // for (int d=0; d<3; d++)
# ifdef __CUDACC__
__syncthreads();
# endif
} // FUNCTION : Hydro_RiemannPredict_Flux
//-------------------------------------------------------------------------------------------------------
// Function : Hydro_RiemannPredict
// Description : Evolve the cell-centered variables by half time-step using the fluxes returned
// by Hydro_RiemannPredict_Flux()
//
// Note : 1. Work for the MHM_RP scheme
// 2. For the performance consideration, the output data are converted to primitive variables
// --> Reducing the global memory access on GPU
// 3. Cell-centered B field is simply obtained by averaging the half-step face-centered B field
//
// Parameter : g_ConVar_In : Array storing the input conserved variables
// g_FC_B_Half : Array storing the input half-step face-centered B field
// g_Flux_Half : Array storing the input face-centered fluxes
// --> Accessed with the stride N_HF_FLUX
// g_PriVar_Half : Array to store the output primitive variables
// --> Accessed with the stride N_HF_VAR
// --> Although its actually allocated size is FLU_NXT^3 since it points to g_PriVar_1PG[]
// dt : Time interval to advance solution
// dh : Cell size
// MinDens/Pres/Eint : Density, pressure, and internal energy floors
// FracPassive : true --> convert passive scalars to mass fraction during data reconstruction
// NFrac : Number of passive scalars for the option "FracPassive"
// FracIdx : Target variable indices for the option "FracPassive"
// JeansMinPres : Apply minimum pressure estimated from the Jeans length
// JeansMinPres_Coeff : Coefficient used by JeansMinPres = G*(Jeans_NCell*Jeans_dh)^2/(Gamma*pi);
// EoS : EoS object
//-------------------------------------------------------------------------------------------------------
GPU_DEVICE
void Hydro_RiemannPredict( const real g_ConVar_In[][ CUBE(FLU_NXT) ],
const real g_FC_B_Half[][ FLU_NXT_P1*SQR(FLU_NXT) ],
const real g_Flux_Half[][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_FLUX) ],
real g_PriVar_Half[][ CUBE(FLU_NXT) ],
const real dt, const real dh,
const real MinDens, const real MinPres, const real MinEint,
const bool FracPassive, const int NFrac, const int FracIdx[],
const bool JeansMinPres, const real JeansMinPres_Coeff,
const EoS_t *EoS )
{
const int didx_flux[3] = { 1, N_HF_FLUX, SQR(N_HF_FLUX) };
const real dt_dh2 = (real)0.5*dt/dh;
const int N_HF_VAR2 = SQR(N_HF_VAR);
CGPU_LOOP( idx_out, CUBE(N_HF_VAR) )
{
const int i_out = idx_out % N_HF_VAR;
const int j_out = idx_out % N_HF_VAR2 / N_HF_VAR;
const int k_out = idx_out / N_HF_VAR2;
// for MHD, one additional flux is evaluated along each transverse direction for computing the CT electric field
# ifdef MHD
const int i_flux = i_out + 1;
const int j_flux = j_out + 1;
const int k_flux = k_out + 1;
# else
const int i_flux = i_out;
const int j_flux = j_out;
const int k_flux = k_out;
# endif
const int idx_flux = IDX321( i_flux, j_flux, k_flux, N_HF_FLUX, N_HF_FLUX );
const int i_in = i_out + 1;
const int j_in = j_out + 1;
const int k_in = k_out + 1;
const int idx_in = IDX321( i_in, j_in, k_in, FLU_NXT, FLU_NXT );
real out_con[NCOMP_TOTAL_PLUS_MAG], out_pri[NCOMP_TOTAL_PLUS_MAG], dflux[3][NCOMP_TOTAL];
# ifdef LR_EINT
real Eint;
real* const EintPtr = &Eint;
# else
real* const EintPtr = NULL;
# endif
// calculate the flux differences of the fluid variables
for (int d=0; d<3; d++)
for (int v=0; v<NCOMP_TOTAL; v++)
{
# ifdef MHD
dflux[d][v] = g_Flux_Half[d][v][idx_flux] - g_Flux_Half[d][v][ idx_flux - didx_flux[d] ];
# else
dflux[d][v] = g_Flux_Half[d][v][ idx_flux + didx_flux[d] ] - g_Flux_Half[d][v][idx_flux];
# endif
}
// update the input cell-centered conserved variables with the flux differences
for (int v=0; v<NCOMP_TOTAL; v++)
out_con[v] = g_ConVar_In[v][idx_in] - dt_dh2*( dflux[0][v] + dflux[1][v] + dflux[2][v] );
// compute the cell-centered half-step B field
# ifdef MHD
MHD_GetCellCenteredBField( out_con+MAG_OFFSET, g_FC_B_Half[0], g_FC_B_Half[1], g_FC_B_Half[2],
N_HF_VAR, N_HF_VAR, N_HF_VAR, i_out, j_out, k_out );
# endif
// apply density and internal energy floors
out_con[0] = FMAX( out_con[0], MinDens );
# ifndef BAROTROPIC_EOS
# ifdef MHD
const real Emag = (real)0.5*( SQR(out_con[MAG_OFFSET+0]) + SQR(out_con[MAG_OFFSET+1]) + SQR(out_con[MAG_OFFSET+2]) );
# else
const real Emag = NULL_REAL;
# endif
out_con[4] = Hydro_CheckMinEintInEngy( out_con[0], out_con[1], out_con[2], out_con[3], out_con[4], MinEint, Emag );
# endif // #ifndef BAROTROPIC_EOS
# if ( NCOMP_PASSIVE > 0 )
for (int v=NCOMP_FLUID; v<NCOMP_TOTAL; v++)
out_con[v] = FMAX( out_con[v], TINY_NUMBER );
# endif
// conserved --> primitive variables
Hydro_Con2Pri( out_con, out_pri, MinPres, FracPassive, NFrac, FracIdx, JeansMinPres, JeansMinPres_Coeff,
EoS->DensEint2Pres_FuncPtr, EoS->DensPres2Eint_FuncPtr, EoS->AuxArrayDevPtr_Flt, EoS->AuxArrayDevPtr_Int,
EoS->Table, EintPtr );
// store the results in g_PriVar_Half[]
for (int v=0; v<NCOMP_TOTAL_PLUS_MAG; v++) g_PriVar_Half[v][idx_out] = out_pri[v];
// store Eint in the last variable for LR_EINT
# ifdef LR_EINT
g_PriVar_Half[NCOMP_TOTAL_PLUS_MAG][idx_out] = Hydro_CheckMinEint( Eint, MinEint );
# endif
} // i,j,k
# ifdef __CUDACC__
__syncthreads();
# endif
} // FUNCTION : Hydro_RiemannPredict
#endif // #if ( FLU_SCHEME == MHM_RP )
#endif // #if ( MODEL == HYDRO && ( FLU_SCHEME == MHM || FLU_SCHEME == MHM_RP ) )
|
the_stack
|
#include "octnet/gpu/gpu.h"
#include "octnet/cpu/cpu.h"
#include <thrust/execution_policy.h>
#include <thrust/transform_reduce.h>
#include <thrust/transform_scan.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/functional.h>
#include <thrust/equal.h>
//-------------------------------------------------------------------------------
// helper functions
//-------------------------------------------------------------------------------
extern "C"
octree* octree_new_gpu() {
octree* grid = new octree;
grid->n = 0;
grid->grid_depth = 0;
grid->grid_height = 0;
grid->grid_width = 0;
grid->feature_size = 0;
grid->n_leafs = 0;
grid->trees = 0;
grid->prefix_leafs = 0;
grid->data = 0;
grid->grid_capacity = 0;
grid->data_capacity = 0;
return grid;
}
extern "C"
void octree_free_gpu(octree* grid_d) {
device_free(grid_d->trees);
device_free(grid_d->prefix_leafs);
device_free(grid_d->data);
delete grid_d;
}
void octree_resize_gpu(int n, int grid_depth, int grid_height, int grid_width, int feature_size, int n_leafs, octree* dst) {
if(DEBUG) { printf("[DEBUG] octree_resize_gpu\n"); }
dst->n = n;
dst->grid_depth = grid_depth;
dst->grid_height = grid_height;
dst->grid_width = grid_width;
dst->feature_size = feature_size;
dst->n_leafs = n_leafs;
int grid_capacity = octree_num_blocks(dst);
if(dst->grid_capacity < grid_capacity) {
dst->grid_capacity = grid_capacity;
if(dst->trees != 0) {
device_free(dst->trees);
}
dst->trees = device_malloc<ot_tree_t>(grid_capacity * N_TREE_INTS);
if(dst->prefix_leafs != 0) {
device_free(dst->prefix_leafs);
}
dst->prefix_leafs = device_malloc<ot_size_t>(grid_capacity);
}
int data_capacity = n_leafs * feature_size;
if(dst->data_capacity < data_capacity) {
dst->data_capacity = data_capacity;
if(dst->data != 0) {
device_free(dst->data);
}
dst->data = device_malloc<ot_data_t>(data_capacity);
}
}
void octree_resize_as_gpu(const octree* src, octree* dst) {
octree_resize_gpu(src->n, src->grid_depth, src->grid_height, src->grid_width, src->feature_size, src->n_leafs, dst);
}
__global__ void kernel_octree_clr_trees(ot_tree_t* trees, const int n_tree_ints) {
CUDA_KERNEL_LOOP(idx, n_tree_ints) {
trees[idx] = 0;
}
}
extern "C"
void octree_clr_trees_gpu(octree* grid_d) {
// cudaMemset(grid_d->trees, 0, octree_num_blocks(grid_d) * N_TREE_INTS * sizeof(ot_tree_t));
int n_tree_ints = octree_num_blocks(grid_d) * N_TREE_INTS;
kernel_octree_clr_trees<<<GET_BLOCKS(n_tree_ints), CUDA_NUM_THREADS>>>(
grid_d->trees, n_tree_ints
);
CUDA_POST_KERNEL_CHECK;
}
extern "C"
void octree_fill_data_gpu(octree* grid_d, ot_data_t fill_value) {
int n = grid_d->feature_size * grid_d->n_leafs;
thrust::fill_n(thrust::device, grid_d->data, n, fill_value);
}
// template <int grid_idx_offset>
template <typename OUT_TYPE>
struct thrust_tree_num_leafs : public thrust::unary_function<int, OUT_TYPE> {
const octree grid;
const OUT_TYPE mul;
// const int n_blocks;
thrust_tree_num_leafs(const octree grid_, const OUT_TYPE mul_) :
grid(grid_), mul(mul_) {
// thrust_tree_num_leafs(const octree grid_, const int n_block_) :
// grid(grid_), n_blocks(n_block_) {
}
__host__ __device__ OUT_TYPE operator()(const int grid_idx) {
// printf(" ... grid_idx %d - %ld\n", grid_idx, mul * tree_n_leafs( octree_get_tree(&grid, grid_idx) ));
return mul * tree_n_leafs( octree_get_tree(&grid, grid_idx) );
}
};
extern "C"
void octree_upd_n_leafs_gpu(octree* grid_d) {
int n_blocks = octree_num_blocks(grid_d);
thrust::counting_iterator<int> grid_idx_iter(0);
grid_d->n_leafs = thrust::transform_reduce(
thrust::device,
grid_idx_iter, grid_idx_iter + n_blocks,
thrust_tree_num_leafs<ot_size_t>(*grid_d, 1), 0, thrust::plus<ot_size_t>()
);
}
extern "C"
void octree_upd_prefix_leafs_gpu(octree* grid_d) {
int n_blocks = octree_num_blocks(grid_d);
thrust::counting_iterator<int> grid_idx_iter(0);
thrust::transform_exclusive_scan(thrust::device,
grid_idx_iter, grid_idx_iter + n_blocks,
grid_d->prefix_leafs,
thrust_tree_num_leafs<ot_size_t>(*grid_d, 1),
0, thrust::plus<ot_size_t>());
}
void octree_cpy_trees_cpu_gpu(const octree* src_h, octree* dst_d) {
if(DEBUG) { printf("[DEBUG] octree_cpy_trees_cpu_gpu\n"); }
host_to_device(src_h->trees, dst_d->trees, octree_num_blocks(src_h) * N_TREE_INTS);
}
void octree_cpy_prefix_leafs_cpu_gpu(const octree* src_h, octree* dst_d) {
if(DEBUG) { printf("[DEBUG] octree_cpy_prefix_leafs_cpu_gpu\n"); }
host_to_device(src_h->prefix_leafs, dst_d->prefix_leafs, octree_num_blocks(src_h));
}
void octree_cpy_data_cpu_gpu(const octree* src_h, octree* dst_d) {
if(DEBUG) { printf("[DEBUG] octree_cpy_data_cpu_gpu\n"); }
host_to_device(src_h->data, dst_d->data, src_h->n_leafs * src_h->feature_size);
}
void octree_cpy_trees_gpu_cpu(const octree* src_d, octree* dst_h) {
if(DEBUG) { printf("[DEBUG] octree_cpy_trees_gpu_cpu\n"); }
device_to_host(src_d->trees, dst_h->trees, octree_num_blocks(src_d) * N_TREE_INTS);
}
void octree_cpy_prefix_leafs_gpu_cpu(const octree* src_d, octree* dst_h) {
if(DEBUG) { printf("[DEBUG] octree_cpy_prefix_leafs_gpu_cpu\n"); }
device_to_host(src_d->prefix_leafs, dst_h->prefix_leafs, octree_num_blocks(src_d));
}
void octree_cpy_data_gpu_cpu(const octree* src_d, octree* dst_h) {
if(DEBUG) { printf("[DEBUG] octree_cpy_data_gpu_cpu\n"); }
device_to_host(src_d->data, dst_h->data, src_d->n_leafs * src_d->feature_size);
}
void octree_cpy_trees_gpu_gpu(const octree* src_d, octree* dst_d) {
if(DEBUG) { printf("[DEBUG] octree_cpy_trees_gpu_gpu\n"); }
device_to_device(src_d->trees, dst_d->trees, octree_num_blocks(src_d) * N_TREE_INTS);
}
void octree_cpy_prefix_leafs_gpu_gpu(const octree* src_d, octree* dst_d) {
if(DEBUG) { printf("[DEBUG] octree_cpy_prefix_leafs_gpu_gpu\n"); }
device_to_device(src_d->prefix_leafs, dst_d->prefix_leafs, octree_num_blocks(src_d));
}
void octree_cpy_data_gpu_gpu(const octree* src_d, octree* dst_d) {
if(DEBUG) { printf("[DEBUG] octree_cpy_data_gpu_gpu\n"); }
device_to_device(src_d->data, dst_d->data, src_d->n_leafs * src_d->feature_size);
}
__global__ void kernel_cpy_sup_to_sub(octree sub, int n_leafs, const octree sup) {
CUDA_KERNEL_LOOP(sub_leaf_idx, n_leafs) {
int grid_idx = sub.data[sub_leaf_idx * sub.feature_size];
int data_idx = sub_leaf_idx - sub.prefix_leafs[grid_idx];
int sub_bit_idx = data_idx_to_bit_idx(octree_get_tree(&sub, grid_idx), data_idx);
const ot_tree_t* sup_tree = octree_get_tree(&sup, grid_idx);
int sup_bit_idx = tree_bit_idx_leaf(sup_tree, sub_bit_idx);
int sup_data_idx = tree_data_idx(sup_tree, sup_bit_idx, sup.feature_size);
octree_cpy_leaf(octree_get_data(&sup, grid_idx) + sup_data_idx, sup.feature_size, sub.data + sub_leaf_idx * sub.feature_size);
}
}
extern "C"
void octree_cpy_sup_to_sub_gpu(const octree* sup, octree* sub) {
octree_leaf_idx_to_grid_idx_gpu(sub, sub->feature_size, sub->data_capacity, sub->data);
kernel_cpy_sup_to_sub<<<GET_BLOCKS(sub->n_leafs), CUDA_NUM_THREADS>>>(
*sub, sub->n_leafs, *sup
);
CUDA_POST_KERNEL_CHECK;
}
__global__ void kernel_cpy_sub_to_sup(octree sup, int n_leafs, const octree sub) {
CUDA_KERNEL_LOOP(sub_leaf_idx, n_leafs) {
int grid_idx = leaf_idx_to_grid_idx(&sub, sub_leaf_idx);
int data_idx = sub_leaf_idx - sub.prefix_leafs[grid_idx];
int sub_bit_idx = data_idx_to_bit_idx(octree_get_tree(&sub, grid_idx), data_idx);
const ot_tree_t* sup_tree = octree_get_tree(&sup, grid_idx);
int sup_bit_idx = tree_bit_idx_leaf(sup_tree, sub_bit_idx);
int sup_data_idx = tree_data_idx(sup_tree, sup_bit_idx, sup.feature_size);
ot_data_t* sup_data = octree_get_data(&sup, grid_idx);
for(int f = 0; f < sup.feature_size; ++f) {
atomicAdd(sup_data + (sup_data_idx + f), sub.data[sub_leaf_idx * sub.feature_size + f]);
}
}
}
extern "C"
void octree_cpy_sub_to_sup_sum_gpu(const octree* sub, octree* sup) {
octree_fill_data_gpu(sup, 0);
kernel_cpy_sub_to_sup<<<GET_BLOCKS(sub->n_leafs), CUDA_NUM_THREADS>>>(
*sup, sub->n_leafs, *sub
);
CUDA_POST_KERNEL_CHECK;
}
extern "C"
void octree_copy_gpu(const octree* src, octree* dst) {
octree_resize_as_gpu(src, dst);
octree_cpy_trees_gpu_gpu(src, dst);
octree_cpy_prefix_leafs_gpu_gpu(src, dst);
octree_cpy_data_gpu_gpu(src, dst);
}
void octree_to_gpu(const octree* grid_h, octree* grid_d) {
int n_blocks = octree_num_blocks(grid_h);
octree_resize_as_gpu(grid_h, grid_d);
if(n_blocks > 0) {
octree_cpy_trees_cpu_gpu(grid_h, grid_d);
octree_cpy_prefix_leafs_cpu_gpu(grid_h, grid_d);
octree_cpy_data_cpu_gpu(grid_h, grid_d);
}
}
void octree_to_cpu(const octree* grid_d, octree* grid_h) {
int n_blocks = octree_num_blocks(grid_d);
octree_resize_as_cpu(grid_d, grid_h);
if(n_blocks > 0) {
octree_cpy_trees_gpu_cpu(grid_d, grid_h);
octree_cpy_prefix_leafs_gpu_cpu(grid_d, grid_h);
octree_cpy_data_gpu_cpu(grid_d, grid_h);
}
}
template <typename T>
__global__ void kernel_octree_leaf_idx_to_grid_idx(T* inds, int n_blocks, const octree in, const int stride, const int inds_length) {
CUDA_KERNEL_LOOP(grid_idx, n_blocks) {
const ot_tree_t* tree = octree_get_tree(&in, grid_idx);
// int cum_n_leafs = n_leafs_upto(&in, grid_idx);
int cum_n_leafs = in.prefix_leafs[grid_idx];
int n_leafs = tree_n_leafs(tree);
for(int leaf_idx = 0; leaf_idx < n_leafs; ++leaf_idx) {
int inds_idx = cum_n_leafs * stride + leaf_idx * stride;
// if(leaf_idx >= inds_length) printf("[ERROR] in kernel_octree_leaf_idx_to_grid_idx, %d, %d\n", leaf_idx, inds_length);
inds[inds_idx] = grid_idx;
}
}
}
template <typename T>
void octree_leaf_idx_to_grid_idx_gpu(const octree* in, const int stride, const int inds_length, T* inds) {
if(DEBUG > 1) { printf("[DEBUG] octree_leaf_idx_to_grid_idx_gpu stride=%d, n_blocks=%d\n", stride, octree_num_blocks(in)); }
const int n_blocks = octree_num_blocks(in);
kernel_octree_leaf_idx_to_grid_idx<<<GET_BLOCKS_T(n_blocks, 1024), 1024>>>(inds, n_blocks, *in, stride, inds_length);
CUDA_POST_KERNEL_CHECK;
}
template void octree_leaf_idx_to_grid_idx_gpu<ot_data_t>(const octree* in, const int stride, const int inds_length, ot_data_t* inds);
template void octree_leaf_idx_to_grid_idx_gpu<ot_size_t>(const octree* in, const int stride, const int inds_length, ot_size_t* inds);
bool octree_equal_trees_gpu(const octree* in1, const octree* in2) {
if(DEBUG) { printf("[DEBUG] octree_equal_trees_gpu\n"); }
if(in1->n_leafs != in2->n_leafs) {
return false;
}
int n_blocks1 = octree_num_blocks(in1);
int n_blocks2 = octree_num_blocks(in2);
if(n_blocks1 != n_blocks2) {
return false;
}
thrust::equal(thrust::device, in2->trees, in2->trees + (N_TREE_INTS * n_blocks1), in2->trees);
thrust::equal(thrust::device, in1->trees, in1->trees + (N_TREE_INTS * n_blocks1), in1->trees);
bool eq = thrust::equal(thrust::device, in1->trees, in1->trees + (N_TREE_INTS * n_blocks1), in2->trees);
return eq;
}
bool octree_equal_prefix_leafs_gpu(const octree* in1, const octree* in2) {
int n_blocks1 = octree_num_blocks(in1);
int n_blocks2 = octree_num_blocks(in2);
if(n_blocks1 != n_blocks2) {
return false;
}
return thrust::equal(thrust::device, in1->prefix_leafs, in1->prefix_leafs + n_blocks1, in2->prefix_leafs);
}
bool octree_equal_data_gpu(const octree* in1, const octree* in2) {
if(in1->feature_size * in1->n_leafs != in2->feature_size * in2->n_leafs) {
return false;
}
return thrust::equal(thrust::device, in1->data, in1->data + (in1->feature_size * in1->n_leafs), in2->data);
}
bool octree_equal_gpu(const octree* in1, const octree* in2) {
if(!octree_equal_shape(in1, in2)) { return false; }
if(in1->n_leafs != in2->n_leafs) { return false; }
if(!octree_equal_trees_gpu(in1, in2)) {
return false;
}
if(!octree_equal_prefix_leafs_gpu(in1, in2)) {
return false;
}
if(!octree_equal_data_gpu(in1, in2)) {
return false;
}
return true;
}
|
the_stack
|
namespace xgboost {
namespace tree {
// With constraints
template <typename GradientPairT>
XGBOOST_DEVICE float LossChangeMissing(const GradientPairT& scan,
const GradientPairT& missing,
const GradientPairT& parent_sum,
const GPUTrainingParam& param,
int constraint,
const ValueConstraint& value_constraint,
bool& missing_left_out) { // NOLINT
float parent_gain = CalcGain(param, parent_sum);
float missing_left_gain = value_constraint.CalcSplitGain(
param, constraint, GradStats(scan + missing),
GradStats(parent_sum - (scan + missing)));
float missing_right_gain = value_constraint.CalcSplitGain(
param, constraint, GradStats(scan), GradStats(parent_sum - scan));
if (missing_left_gain >= missing_right_gain) {
missing_left_out = true;
return missing_left_gain - parent_gain;
} else {
missing_left_out = false;
return missing_right_gain - parent_gain;
}
}
/*!
* \brief
*
* \tparam ReduceT BlockReduce Type.
* \tparam TempStorage Cub Shared memory
*
* \param begin
* \param end
* \param temp_storage Shared memory for intermediate result.
*/
template <int BLOCK_THREADS, typename ReduceT, typename TempStorageT,
typename GradientSumT>
__device__ GradientSumT
ReduceFeature(common::Span<const GradientSumT> feature_histogram,
TempStorageT* temp_storage) {
__shared__ cub::Uninitialized<GradientSumT> uninitialized_sum;
GradientSumT& shared_sum = uninitialized_sum.Alias();
GradientSumT local_sum = GradientSumT();
// For loop sums features into one block size
auto begin = feature_histogram.data();
auto end = begin + feature_histogram.size();
for (auto itr = begin; itr < end; itr += BLOCK_THREADS) {
bool thread_active = itr + threadIdx.x < end;
// Scan histogram
GradientSumT bin = thread_active ? *(itr + threadIdx.x) : GradientSumT();
local_sum += bin;
}
local_sum = ReduceT(temp_storage->sum_reduce).Reduce(local_sum, cub::Sum());
// Reduction result is stored in thread 0.
if (threadIdx.x == 0) {
shared_sum = local_sum;
}
__syncthreads();
return shared_sum;
}
/*! \brief Find the thread with best gain. */
template <int BLOCK_THREADS, typename ReduceT, typename ScanT,
typename MaxReduceT, typename TempStorageT, typename GradientSumT>
__device__ void EvaluateFeature(
int fidx, EvaluateSplitInputs<GradientSumT> inputs,
DeviceSplitCandidate* best_split, // shared memory storing best split
TempStorageT* temp_storage // temp memory for cub operations
) {
// Use pointer from cut to indicate begin and end of bins for each feature.
uint32_t gidx_begin = inputs.feature_segments[fidx]; // begining bin
uint32_t gidx_end =
inputs.feature_segments[fidx + 1]; // end bin for i^th feature
// Sum histogram bins for current feature
GradientSumT const feature_sum =
ReduceFeature<BLOCK_THREADS, ReduceT, TempStorageT, GradientSumT>(
inputs.gradient_histogram.subspan(gidx_begin, gidx_end - gidx_begin),
temp_storage);
GradientSumT const missing = inputs.parent_sum - feature_sum;
float const null_gain = -std::numeric_limits<bst_float>::infinity();
SumCallbackOp<GradientSumT> prefix_op = SumCallbackOp<GradientSumT>();
for (int scan_begin = gidx_begin; scan_begin < gidx_end;
scan_begin += BLOCK_THREADS) {
bool thread_active = (scan_begin + threadIdx.x) < gidx_end;
// Gradient value for current bin.
GradientSumT bin = thread_active
? inputs.gradient_histogram[scan_begin + threadIdx.x]
: GradientSumT();
ScanT(temp_storage->scan).ExclusiveScan(bin, bin, cub::Sum(), prefix_op);
// Whether the gradient of missing values is put to the left side.
bool missing_left = true;
float gain = null_gain;
if (thread_active) {
gain = LossChangeMissing(bin, missing, inputs.parent_sum, inputs.param,
inputs.monotonic_constraints[fidx],
inputs.value_constraint, missing_left);
}
__syncthreads();
// Find thread with best gain
cub::KeyValuePair<int, float> tuple(threadIdx.x, gain);
cub::KeyValuePair<int, float> best =
MaxReduceT(temp_storage->max_reduce).Reduce(tuple, cub::ArgMax());
__shared__ cub::KeyValuePair<int, float> block_max;
if (threadIdx.x == 0) {
block_max = best;
}
__syncthreads();
// Best thread updates split
if (threadIdx.x == block_max.key) {
int split_gidx = (scan_begin + threadIdx.x) - 1;
float fvalue;
if (split_gidx < static_cast<int>(gidx_begin)) {
fvalue = inputs.min_fvalue[fidx];
} else {
fvalue = inputs.feature_values[split_gidx];
}
GradientSumT left = missing_left ? bin + missing : bin;
GradientSumT right = inputs.parent_sum - left;
best_split->Update(gain, missing_left ? kLeftDir : kRightDir, fvalue,
fidx, GradientPair(left), GradientPair(right),
inputs.param);
}
__syncthreads();
}
}
template <int BLOCK_THREADS, typename GradientSumT>
__global__ void EvaluateSplitsKernel(
EvaluateSplitInputs<GradientSumT> left,
EvaluateSplitInputs<GradientSumT> right,
common::Span<DeviceSplitCandidate> out_candidates) {
// KeyValuePair here used as threadIdx.x -> gain_value
using ArgMaxT = cub::KeyValuePair<int, float>;
using BlockScanT =
cub::BlockScan<GradientSumT, BLOCK_THREADS, cub::BLOCK_SCAN_WARP_SCANS>;
using MaxReduceT = cub::BlockReduce<ArgMaxT, BLOCK_THREADS>;
using SumReduceT = cub::BlockReduce<GradientSumT, BLOCK_THREADS>;
union TempStorage {
typename BlockScanT::TempStorage scan;
typename MaxReduceT::TempStorage max_reduce;
typename SumReduceT::TempStorage sum_reduce;
};
// Aligned && shared storage for best_split
__shared__ cub::Uninitialized<DeviceSplitCandidate> uninitialized_split;
DeviceSplitCandidate& best_split = uninitialized_split.Alias();
__shared__ TempStorage temp_storage;
if (threadIdx.x == 0) {
best_split = DeviceSplitCandidate();
}
__syncthreads();
// If this block is working on the left or right node
bool is_left = blockIdx.x < left.feature_set.size();
EvaluateSplitInputs<GradientSumT>& inputs = is_left ? left : right;
// One block for each feature. Features are sampled, so fidx != blockIdx.x
int fidx = inputs.feature_set[is_left ? blockIdx.x
: blockIdx.x - left.feature_set.size()];
EvaluateFeature<BLOCK_THREADS, SumReduceT, BlockScanT, MaxReduceT>(
fidx, inputs, &best_split, &temp_storage);
__syncthreads();
if (threadIdx.x == 0) {
// Record best loss for each feature
out_candidates[blockIdx.x] = best_split;
}
}
__device__ DeviceSplitCandidate operator+(const DeviceSplitCandidate& a,
const DeviceSplitCandidate& b) {
return b.loss_chg > a.loss_chg ? b : a;
}
template <typename GradientSumT>
void EvaluateSplits(common::Span<DeviceSplitCandidate> out_splits,
EvaluateSplitInputs<GradientSumT> left,
EvaluateSplitInputs<GradientSumT> right) {
size_t combined_num_features =
left.feature_set.size() + right.feature_set.size();
dh::TemporaryArray<DeviceSplitCandidate> feature_best_splits(
combined_num_features);
// One block for each feature
uint32_t constexpr kBlockThreads = 256;
dh::LaunchKernel {uint32_t(combined_num_features), kBlockThreads, 0}(
EvaluateSplitsKernel<kBlockThreads, GradientSumT>, left, right,
dh::ToSpan(feature_best_splits));
// Reduce to get best candidate for left and right child over all features
auto reduce_offset =
dh::MakeTransformIterator<size_t>(thrust::make_counting_iterator(0llu),
[=] __device__(size_t idx) -> size_t {
if (idx == 0) {
return 0;
}
if (idx == 1) {
return left.feature_set.size();
}
if (idx == 2) {
return combined_num_features;
}
return 0;
});
size_t temp_storage_bytes = 0;
auto num_segments = out_splits.size();
cub::DeviceSegmentedReduce::Sum(nullptr, temp_storage_bytes,
feature_best_splits.data(), out_splits.data(),
num_segments, reduce_offset, reduce_offset + 1);
dh::TemporaryArray<int8_t> temp(temp_storage_bytes);
cub::DeviceSegmentedReduce::Sum(temp.data().get(), temp_storage_bytes,
feature_best_splits.data(), out_splits.data(),
num_segments, reduce_offset, reduce_offset + 1);
}
template <typename GradientSumT>
void EvaluateSingleSplit(common::Span<DeviceSplitCandidate> out_split,
EvaluateSplitInputs<GradientSumT> input) {
EvaluateSplits(out_split, input, {});
}
template void EvaluateSplits<GradientPair>(
common::Span<DeviceSplitCandidate> out_splits,
EvaluateSplitInputs<GradientPair> left,
EvaluateSplitInputs<GradientPair> right);
template void EvaluateSplits<GradientPairPrecise>(
common::Span<DeviceSplitCandidate> out_splits,
EvaluateSplitInputs<GradientPairPrecise> left,
EvaluateSplitInputs<GradientPairPrecise> right);
template void EvaluateSingleSplit<GradientPair>(
common::Span<DeviceSplitCandidate> out_split,
EvaluateSplitInputs<GradientPair> input);
template void EvaluateSingleSplit<GradientPairPrecise>(
common::Span<DeviceSplitCandidate> out_split,
EvaluateSplitInputs<GradientPairPrecise> input);
} // namespace tree
} // namespace xgboost
|
the_stack
|
#include <iostream>
#include "viennacl.hpp"
#include "viennacl_private.hpp"
//include basic scalar and vector types of ViennaCL
#include "viennacl/scalar.hpp"
#include "viennacl/vector.hpp"
#include "viennacl/vector.hpp"
#include "viennacl/matrix.hpp"
#include "viennacl/linalg/direct_solve.hpp"
#include "viennacl/linalg/prod.hpp"
// xGEMV
VIENNACL_EXPORTED_FUNCTION ViennaCLStatus ViennaCLHostSgemv(ViennaCLBackend /*backend*/,
ViennaCLOrder order, ViennaCLTranspose transA,
ViennaCLInt m, ViennaCLInt n, float alpha, float *A, ViennaCLInt offA_row, ViennaCLInt offA_col, ViennaCLInt incA_row, ViennaCLInt incA_col, ViennaCLInt lda,
float *x, ViennaCLInt offx, ViennaCLInt incx,
float beta,
float *y, ViennaCLInt offy, ViennaCLInt incy)
{
typedef viennacl::vector_base<float>::size_type size_type;
typedef viennacl::vector_base<float>::size_type difference_type;
viennacl::vector_base<float> v1(x, viennacl::MAIN_MEMORY, size_type(n), size_type(offx), difference_type(incx));
viennacl::vector_base<float> v2(y, viennacl::MAIN_MEMORY, size_type(m), size_type(offy), difference_type(incy));
viennacl::matrix_base<float> mat(A, viennacl::MAIN_MEMORY,
size_type(m), size_type(offA_row), difference_type(incA_row), size_type(m),
size_type(n), size_type(offA_col), difference_type(incA_col), size_type(lda), order == ViennaCLRowMajor);
v2 *= beta;
if (transA == ViennaCLTrans)
v2 += alpha * viennacl::linalg::prod(viennacl::trans(mat), v1);
else
v2 += alpha * viennacl::linalg::prod(mat, v1);
return ViennaCLSuccess;
}
VIENNACL_EXPORTED_FUNCTION ViennaCLStatus ViennaCLHostDgemv(ViennaCLBackend /*backend*/,
ViennaCLOrder order, ViennaCLTranspose transA,
ViennaCLInt m, ViennaCLInt n, double alpha, double *A, ViennaCLInt offA_row, ViennaCLInt offA_col, ViennaCLInt incA_row, ViennaCLInt incA_col, ViennaCLInt lda,
double *x, ViennaCLInt offx, ViennaCLInt incx,
double beta,
double *y, ViennaCLInt offy, ViennaCLInt incy)
{
typedef viennacl::vector_base<double>::size_type size_type;
typedef viennacl::vector_base<double>::size_type difference_type;
viennacl::vector_base<double> v1(x, viennacl::MAIN_MEMORY, size_type(n), size_type(offx), difference_type(incx));
viennacl::vector_base<double> v2(y, viennacl::MAIN_MEMORY, size_type(m), size_type(offy), difference_type(incy));
viennacl::matrix_base<double> mat(A, viennacl::MAIN_MEMORY,
size_type(m), size_type(offA_row), difference_type(incA_row), size_type(m),
size_type(n), size_type(offA_col), difference_type(incA_col), size_type(lda), order == ViennaCLRowMajor);
v2 *= beta;
if (transA == ViennaCLTrans)
v2 += alpha * viennacl::linalg::prod(viennacl::trans(mat), v1);
else
v2 += alpha * viennacl::linalg::prod(mat, v1);
return ViennaCLSuccess;
}
// xTRSV
VIENNACL_EXPORTED_FUNCTION ViennaCLStatus ViennaCLHostStrsv(ViennaCLBackend /*backend*/,
ViennaCLUplo uplo, ViennaCLOrder order, ViennaCLTranspose transA, ViennaCLDiag diag,
ViennaCLInt n, float *A, ViennaCLInt offA_row, ViennaCLInt offA_col, ViennaCLInt incA_row, ViennaCLInt incA_col, ViennaCLInt lda,
float *x, ViennaCLInt offx, ViennaCLInt incx)
{
typedef viennacl::vector_base<float>::size_type size_type;
typedef viennacl::vector_base<float>::size_type difference_type;
viennacl::vector_base<float> v(x, viennacl::MAIN_MEMORY, size_type(n), size_type(offx), difference_type(incx));
viennacl::matrix_base<float> mat(A, viennacl::MAIN_MEMORY,
size_type(n), size_type(offA_row), difference_type(incA_row), size_type(n),
size_type(n), size_type(offA_col), difference_type(incA_col), size_type(lda), order == ViennaCLRowMajor);
if (transA == ViennaCLTrans)
{
if (uplo == ViennaCLUpper)
if (diag == ViennaCLUnit)
viennacl::linalg::inplace_solve(viennacl::trans(mat), v, viennacl::linalg::unit_upper_tag());
else
viennacl::linalg::inplace_solve(viennacl::trans(mat), v, viennacl::linalg::upper_tag());
else
if (diag == ViennaCLUnit)
viennacl::linalg::inplace_solve(viennacl::trans(mat), v, viennacl::linalg::unit_lower_tag());
else
viennacl::linalg::inplace_solve(viennacl::trans(mat), v, viennacl::linalg::lower_tag());
}
else
{
if (uplo == ViennaCLUpper)
if (diag == ViennaCLUnit)
viennacl::linalg::inplace_solve(mat, v, viennacl::linalg::unit_upper_tag());
else
viennacl::linalg::inplace_solve(mat, v, viennacl::linalg::upper_tag());
else
if (diag == ViennaCLUnit)
viennacl::linalg::inplace_solve(mat, v, viennacl::linalg::unit_lower_tag());
else
viennacl::linalg::inplace_solve(mat, v, viennacl::linalg::lower_tag());
}
return ViennaCLSuccess;
}
VIENNACL_EXPORTED_FUNCTION ViennaCLStatus ViennaCLHostDtrsv(ViennaCLBackend /*backend*/,
ViennaCLUplo uplo, ViennaCLOrder order, ViennaCLTranspose transA, ViennaCLDiag diag,
ViennaCLInt n, double *A, ViennaCLInt offA_row, ViennaCLInt offA_col, ViennaCLInt incA_row, ViennaCLInt incA_col, ViennaCLInt lda,
double *x, ViennaCLInt offx, ViennaCLInt incx)
{
typedef viennacl::vector_base<double>::size_type size_type;
typedef viennacl::vector_base<double>::size_type difference_type;
viennacl::vector_base<double> v(x, viennacl::MAIN_MEMORY, size_type(n), size_type(offx), difference_type(incx));
viennacl::matrix_base<double> mat(A, viennacl::MAIN_MEMORY,
size_type(n), size_type(offA_row), difference_type(incA_row), size_type(n),
size_type(n), size_type(offA_col), difference_type(incA_col), size_type(lda), order == ViennaCLRowMajor);
if (transA == ViennaCLTrans)
{
if (uplo == ViennaCLUpper)
if (diag == ViennaCLUnit)
viennacl::linalg::inplace_solve(viennacl::trans(mat), v, viennacl::linalg::unit_upper_tag());
else
viennacl::linalg::inplace_solve(viennacl::trans(mat), v, viennacl::linalg::upper_tag());
else
if (diag == ViennaCLUnit)
viennacl::linalg::inplace_solve(viennacl::trans(mat), v, viennacl::linalg::unit_lower_tag());
else
viennacl::linalg::inplace_solve(viennacl::trans(mat), v, viennacl::linalg::lower_tag());
}
else
{
if (uplo == ViennaCLUpper)
if (diag == ViennaCLUnit)
viennacl::linalg::inplace_solve(mat, v, viennacl::linalg::unit_upper_tag());
else
viennacl::linalg::inplace_solve(mat, v, viennacl::linalg::upper_tag());
else
if (diag == ViennaCLUnit)
viennacl::linalg::inplace_solve(mat, v, viennacl::linalg::unit_lower_tag());
else
viennacl::linalg::inplace_solve(mat, v, viennacl::linalg::lower_tag());
}
return ViennaCLSuccess;
}
// xGER
VIENNACL_EXPORTED_FUNCTION ViennaCLStatus ViennaCLHostSger(ViennaCLBackend /*backend*/,
ViennaCLOrder order,
ViennaCLInt m, ViennaCLInt n,
float alpha,
float *x, ViennaCLInt offx, ViennaCLInt incx,
float *y, ViennaCLInt offy, ViennaCLInt incy,
float *A, ViennaCLInt offA_row, ViennaCLInt offA_col, ViennaCLInt incA_row, ViennaCLInt incA_col, ViennaCLInt lda)
{
typedef viennacl::vector_base<float>::size_type size_type;
typedef viennacl::vector_base<float>::size_type difference_type;
viennacl::vector_base<float> v1(x, viennacl::MAIN_MEMORY, size_type(n), size_type(offx), difference_type(incx));
viennacl::vector_base<float> v2(y, viennacl::MAIN_MEMORY, size_type(m), size_type(offy), difference_type(incy));
viennacl::matrix_base<float> mat(A, viennacl::MAIN_MEMORY,
size_type(m), size_type(offA_row), difference_type(incA_row), size_type(m),
size_type(n), size_type(offA_col), difference_type(incA_col), size_type(lda), order == ViennaCLRowMajor);
mat += alpha * viennacl::linalg::outer_prod(v1, v2);
return ViennaCLSuccess;
}
VIENNACL_EXPORTED_FUNCTION ViennaCLStatus ViennaCLHostDger(ViennaCLBackend /*backend*/,
ViennaCLOrder order,
ViennaCLInt m, ViennaCLInt n,
double alpha,
double *x, ViennaCLInt offx, ViennaCLInt incx,
double *y, ViennaCLInt offy, ViennaCLInt incy,
double *A, ViennaCLInt offA_row, ViennaCLInt offA_col, ViennaCLInt incA_row, ViennaCLInt incA_col, ViennaCLInt lda)
{
typedef viennacl::vector_base<double>::size_type size_type;
typedef viennacl::vector_base<double>::size_type difference_type;
viennacl::vector_base<double> v1(x, viennacl::MAIN_MEMORY, size_type(n), size_type(offx), difference_type(incx));
viennacl::vector_base<double> v2(y, viennacl::MAIN_MEMORY, size_type(m), size_type(offy), difference_type(incy));
viennacl::matrix_base<double> mat(A, viennacl::MAIN_MEMORY,
size_type(m), size_type(offA_row), difference_type(incA_row), size_type(m),
size_type(n), size_type(offA_col), difference_type(incA_col), size_type(lda), order == ViennaCLRowMajor);
mat += alpha * viennacl::linalg::outer_prod(v1, v2);
return ViennaCLSuccess;
}
|
the_stack
|
#include "utils/utils.cuh"
#include "utils/intrinsics.cuh"
#include "kernel_libs/kernel_fusion.cuh"
#include "data_structures/graph.cuh"
#include "data_structures/active_set.cuh"
#include "data_structures/functor.cuh"
#include "abstraction/config.cuh"
#include <moderngpu/kernel_sortedsearch.hxx>
#include <moderngpu/kernel_scan.hxx>
const int SCRATH=256; // process SCRATH vertexs in each epoch at most
struct smem_t{
int eid_start; // process eid idx in active_edges
int eid_size;
int vidx_start; // process vertex idx in active_edges
int vidx_size;
int processed;
int vidx_cur_start;
int vidx_cur_size;
int chunk_end;
int v[SCRATH];
int v_start_pos[SCRATH]; // from vidx_start to min(256,vidx_end)
int v_degree_scan[SCRATH]; // from vidx_start to min(256,vidx_end)
};
template<ASFmt fmt, QueueMode M, typename G>
__global__ void
__prepare(active_set_t as, G g, config_t conf){
const int STRIDE = blockDim.x*gridDim.x;
const int gtid = threadIdx.x + blockIdx.x*blockDim.x;
//const int assize = ASProxy<fmt,M>::get_size_hard(as);
const int assize = ASProxy<fmt,M>::get_size(as);
Status want = conf.want();
int v,num;
for(int idx=gtid; idx<assize; idx+=STRIDE){
v = ASProxy<fmt,M>::fetch(as, idx, want);
if(v>=0){
if(conf.conf_dir == Push) num = tex1Dfetch<int>(g.dt_odegree, v);
else num = g.get_in_degree(v);
}else num = 0;
as.workset.dg_degree[idx] = num+1;
}
// block reduce
//tmp = blockReduceSum(tmp);
//if(!threadIdx.x) atomicAdd(as.workset.dg_size, tmp);
}
template<ASFmt fmt, QueueMode M, typename G, typename F>
__global__ void
__expand_VC_STRICT_fused(active_set_t as, G g, F f, config_t conf){
const int* __restrict__ strict_adj_list = g.dg_adj_list;
const int gtid = threadIdx.x + blockIdx.x*blockDim.x;
const int assize = ASProxy<fmt,M>::get_size(as);
if(assize==0){if(gtid==0) as.halt_device();return;}
const int tid = threadIdx.x;
Status want = conf.want();
__shared__ smem_t smem;
if(threadIdx.x==0){
smem.vidx_start = __ldg(as.workset.dg_idx+blockIdx.x);
smem.eid_start = __ldg(as.workset.dg_seid_per_blk+blockIdx.x) - smem.vidx_start;
int vidx_end = __ldg(as.workset.dg_idx+blockIdx.x+1);
int eid_end = __ldg(as.workset.dg_seid_per_blk+blockIdx.x+1) - vidx_end;
smem.vidx_start -= smem.vidx_start>0?1:0;
smem.vidx_size = vidx_end - smem.vidx_start;
smem.eid_size = eid_end - smem.eid_start;
smem.processed = 0;
}
__syncthreads();
if(smem.eid_size <= 0) return;
while(smem.processed < smem.vidx_size){
// compute workload for this round
__syncthreads();
if(threadIdx.x==0){
smem.vidx_cur_start = smem.vidx_start + smem.processed;
int rest = smem.vidx_size - smem.processed;
smem.vidx_cur_size = rest > SCRATH ? SCRATH : rest; // limits
smem.processed += smem.vidx_cur_size;
int end_idx = smem.vidx_cur_start + smem.vidx_cur_size;
if(end_idx < assize) smem.chunk_end = __ldg(as.workset.dg_udegree+end_idx) - end_idx;
else smem.chunk_end = smem.eid_start + smem.eid_size;
}
__syncthreads();
// load the values for this round, smem should have enough space
for(int i = tid; i < smem.vidx_cur_size; i += blockDim.x){
int idx = smem.vidx_cur_start + i;
int v = ASProxy<fmt,M>::fetch(as, idx, want);
smem.v[i] = v;
smem.v_degree_scan[i] = __ldg(as.workset.dg_udegree+idx)-idx;
if(v>=0){
smem.v_start_pos[i] = tex1Dfetch<int>(g.dt_start_pos, v);
}
}
__syncthreads();
// compute the interval of this round [block_start, block_end)
int block_start = smem.v_degree_scan[0];
block_start = block_start < smem.eid_start ? smem.eid_start : block_start;
int block_end = smem.chunk_end;
block_end = (block_end > (smem.eid_start + smem.eid_size)) ? (smem.eid_start+smem.eid_size) : block_end;
int block_size = block_end - block_start;
// process the vertices in interleave mode
int vidx,v,v_start_pos,v_degree_scan,ei;
for(int idx=tid; idx < block_size; idx+= blockDim.x){
int eid = block_start + idx;
vidx = __upper_bound(smem.v_degree_scan, smem.vidx_cur_size, eid)-1;
v = smem.v[vidx];
if(v < 0) continue;
v_start_pos = smem.v_start_pos[vidx];
v_degree_scan = smem.v_degree_scan[vidx];
int uidx = eid-v_degree_scan;
int u = __ldg(strict_adj_list+uidx+v_start_pos);
ei = uidx + v_start_pos;
bool toprocess = true;
auto vdata = f.emit(v, g.fetch_edata(ei), g);
// check 1: if idempotent, we can prune the redundant update
if(toprocess && conf.pruning())
toprocess = as.bitmap.mark_duplicate_lite(u);
//check 2: if not push TO ALL, the target vertex must be Inactive
if(toprocess && !conf.conf_toall)
toprocess = f.cond(u, vdata, g);
// if u pass all the checks, do the computation in the functor
if(toprocess){
toprocess = f.compAtomic(f.wa_of(u), vdata, g);
}
// check 3: enqueue the u only once. (if duplicate, wrong answer)
if(toprocess && !conf.pruning())
toprocess = as.bitmap.mark_duplicate_atomic(u);
// if u is updated successfully, write u to the queue directly
// atomic mode.
if(toprocess){
Qproxy<M>::push(as.queue, u);
}
} //for
} //while
}
template<ASFmt fmt, QueueMode M, typename G, typename F>
__global__ void
__expand_VC_STRICT_wtf(active_set_t as, G g, F f, config_t conf){
const int* __restrict__ strict_adj_list = g.dg_adj_list;
const int assize = ASProxy<fmt,M>::get_size(as);
const int tid = threadIdx.x;
Status want = conf.want();
__shared__ smem_t smem;
if(threadIdx.x==0){
smem.vidx_start = __ldg(as.workset.dg_idx+blockIdx.x);
smem.eid_start = __ldg(as.workset.dg_seid_per_blk+blockIdx.x) - smem.vidx_start;
int vidx_end = __ldg(as.workset.dg_idx+blockIdx.x+1);
int eid_end = __ldg(as.workset.dg_seid_per_blk+blockIdx.x+1) - vidx_end;
smem.vidx_start -= smem.vidx_start>0?1:0;
smem.vidx_size = vidx_end - smem.vidx_start;
smem.eid_size = eid_end - smem.eid_start;
smem.processed = 0;
}
__syncthreads();
if(smem.eid_size <= 0) return;
while(smem.processed < smem.vidx_size){
// // compute workload for this round
__syncthreads();
if(threadIdx.x==0){
smem.vidx_cur_start = smem.vidx_start + smem.processed;
int rest = smem.vidx_size - smem.processed;
smem.vidx_cur_size = rest > SCRATH ? SCRATH : rest; // limits
smem.processed += smem.vidx_cur_size;
int end_idx = smem.vidx_cur_start + smem.vidx_cur_size;
if(end_idx < assize) smem.chunk_end = __ldg(as.workset.dg_udegree+end_idx) - end_idx;
else smem.chunk_end = smem.eid_start + smem.eid_size;
}
__syncthreads();
// load the values for this round, smem should have enough space
for(int i = tid; i < smem.vidx_cur_size; i += blockDim.x){
int idx = smem.vidx_cur_start + i;
int v = ASProxy<fmt,M>::fetch(as, idx, want);
smem.v[i] = v;
smem.v_degree_scan[i] = __ldg(as.workset.dg_udegree+idx)-idx;
smem.v_start_pos[i] = tex1Dfetch<int>(g.dt_start_pos, v);
}
__syncthreads();
// compute the interval of this round [block_start, block_end)
int block_start = smem.v_degree_scan[0];
block_start = block_start < smem.eid_start ? smem.eid_start : block_start;
int block_end = smem.chunk_end;
block_end = (block_end > (smem.eid_start + smem.eid_size)) ? (smem.eid_start+smem.eid_size) : block_end;
int block_size = block_end - block_start;
// process the vertices in interleave mode
// int vidx,v,v_start_pos,v_degree_scan,ei;
for(int idx=tid; idx < block_size; idx+= blockDim.x){
int eid = block_start + idx;
int vidx = __upper_bound(smem.v_degree_scan, smem.vidx_cur_size, eid)-1;
int v = smem.v[vidx];
int v_start_pos = smem.v_start_pos[vidx];
int v_degree_scan = smem.v_degree_scan[vidx];
int uidx = eid-v_degree_scan;
int u = __ldg(strict_adj_list+uidx+v_start_pos);
int ei = uidx + v_start_pos;
bool toprocess = true;
// check 1: if idempotent, we can prune the redundant update
if(toprocess)
toprocess = as.bitmap.mark_duplicate_lite(u);
// check 2: if not push TO ALL, the target vertex must be Inactive
if(toprocess && !conf.conf_toall)
toprocess = as.bitmap.is_inactive(u);
// if u pass all the checks, do the computation in the functor
if(toprocess){
auto vdata = f.emit(v, g.fetch_edata(ei), g);
f.compAtomic(f.wa_of(u), vdata, g);
}
} //for
} //while
}
template<ASFmt fmt, QueueMode M, typename G, typename F>
__global__ void
__expand_VC_STRICT(active_set_t as, G g, F f, config_t conf){
const int* __restrict__ strict_adj_list = g.dg_adj_list;
const int assize = ASProxy<fmt,M>::get_size(as);
const int tid = threadIdx.x;
Status want = conf.want();
__shared__ smem_t smem;
if(threadIdx.x==0){
smem.vidx_start = __ldg(as.workset.dg_idx+blockIdx.x);
smem.eid_start = __ldg(as.workset.dg_seid_per_blk+blockIdx.x) - smem.vidx_start;
int vidx_end = __ldg(as.workset.dg_idx+blockIdx.x+1);
int eid_end = __ldg(as.workset.dg_seid_per_blk+blockIdx.x+1) - vidx_end;
smem.vidx_start -= smem.vidx_start>0?1:0;
smem.vidx_size = vidx_end - smem.vidx_start;
smem.eid_size = eid_end - smem.eid_start;
smem.processed = 0;
}
__syncthreads();
if(smem.eid_size <= 0) return;
while(smem.processed < smem.vidx_size){
// // compute workload for this round
__syncthreads();
if(threadIdx.x==0){
smem.vidx_cur_start = smem.vidx_start + smem.processed;
int rest = smem.vidx_size - smem.processed;
smem.vidx_cur_size = rest > SCRATH ? SCRATH : rest; // limits
smem.processed += smem.vidx_cur_size;
int end_idx = smem.vidx_cur_start + smem.vidx_cur_size;
if(end_idx < assize) smem.chunk_end = __ldg(as.workset.dg_udegree+end_idx) - end_idx;
else smem.chunk_end = smem.eid_start + smem.eid_size;
}
__syncthreads();
// load the values for this round, smem should have enough space
for(int i = tid; i < smem.vidx_cur_size; i += blockDim.x){
int idx = smem.vidx_cur_start + i;
int v = ASProxy<fmt,M>::fetch(as, idx, want);
smem.v[i] = v;
smem.v_degree_scan[i] = __ldg(as.workset.dg_udegree+idx)-idx;
if(v>=0) smem.v_start_pos[i] = tex1Dfetch<int>(g.dt_start_pos, v);
//}
}
__syncthreads();
// compute the interval of this round [block_start, block_end)
int block_start = smem.v_degree_scan[0];
block_start = block_start < smem.eid_start ? smem.eid_start : block_start;
int block_end = smem.chunk_end;
block_end = (block_end > (smem.eid_start + smem.eid_size)) ? (smem.eid_start+smem.eid_size) : block_end;
int block_size = block_end - block_start;
// process the vertices in interleave mode
// int vidx,v,v_start_pos,v_degree_scan,ei;
for(int idx=tid; idx < block_size; idx+= blockDim.x){
int eid = block_start + idx;
int vidx = __upper_bound(smem.v_degree_scan, smem.vidx_cur_size, eid)-1;
int v = smem.v[vidx];
if(v<0) continue;
int v_start_pos = smem.v_start_pos[vidx];
int v_degree_scan = smem.v_degree_scan[vidx];
int uidx = eid-v_degree_scan;
int u = __ldg(strict_adj_list+uidx+v_start_pos);
int ei = uidx + v_start_pos;
bool toprocess = true;
// check 1: if idempotent, we can prune the redundant update
if(toprocess && conf.pruning())
toprocess = as.bitmap.mark_duplicate_lite(u);
// check 2: if not push TO ALL, the target vertex must be Inactive
if(toprocess && !conf.conf_toall)
toprocess = as.bitmap.is_inactive(u);
// if u pass all the checks, do the computation in the functor
if(toprocess){
auto vdata = f.emit(v, g.fetch_edata(ei), g);
f.compAtomic(f.wa_of(u), vdata, g);
}
} //for
} //while
}
template<ASFmt fmt, QueueMode M, typename G, typename F>
__global__ void
__rexpand_VC_STRICT(active_set_t as, G g, F f, config_t conf){
using edata_t = typename G::edge_t;
using vdata_t = typename F::wa_t;
const int* __restrict__ strict_adj_list = g.directed ? g.dgr_adj_list : g.dg_adj_list;
edata_t* strict_edgedata = g.directed? g.dgr_edgedata : g.dg_edgedata;
const int assize = ASProxy<fmt,M>::get_size(as);
const int gtid = threadIdx.x + blockIdx.x*blockDim.x;
if(assize==0){if(gtid==0) as.halt_device();return;}
const int tid = threadIdx.x;
Status want = conf.want();
__shared__ smem_t smem;
if(threadIdx.x==0){
smem.vidx_start = __ldg(as.workset.dg_idx+blockIdx.x);
smem.eid_start = __ldg(as.workset.dg_seid_per_blk+blockIdx.x) - smem.vidx_start;
int vidx_end = __ldg(as.workset.dg_idx+blockIdx.x+1);
int eid_end = __ldg(as.workset.dg_seid_per_blk+blockIdx.x+1) - vidx_end;
smem.vidx_start -= smem.vidx_start>0?1:0;
smem.vidx_size = vidx_end - smem.vidx_start;
smem.eid_size = eid_end - smem.eid_start;
smem.processed = 0;
}
__syncthreads();
if(smem.eid_size <= 0) return;
while(smem.processed < smem.vidx_size){
// compute workload for this round
__syncthreads();
if(threadIdx.x==0){
smem.vidx_cur_start = smem.vidx_start + smem.processed;
int rest = smem.vidx_size - smem.processed;
smem.vidx_cur_size = rest > SCRATH ? SCRATH : rest; // limits
smem.processed += smem.vidx_cur_size;
int end_idx = smem.vidx_cur_start + smem.vidx_cur_size;
if(end_idx < assize) smem.chunk_end = __ldg(as.workset.dg_udegree+end_idx) - end_idx;
else smem.chunk_end = smem.eid_start + smem.eid_size;
}
__syncthreads();
// load the values for this round, smem should have enough space
for(int i = tid; i < smem.vidx_cur_size; i += blockDim.x){
int idx = smem.vidx_cur_start + i;
int v = ASProxy<fmt,M>::fetch(as, idx, want);
smem.v[i] = v;
smem.v_degree_scan[i] = __ldg(as.workset.dg_udegree+idx)-idx;
if(v>=0){
smem.v_start_pos[i] = tex1Dfetch<int>(g.dt_start_pos, v);
}
}
__syncthreads();
// compute the interval of this round [block_start, block_end)
int block_start = smem.v_degree_scan[0];
block_start = block_start < smem.eid_start ? smem.eid_start : block_start;
int block_end = smem.chunk_end;
block_end = (block_end > (smem.eid_start + smem.eid_size)) ? (smem.eid_start+smem.eid_size) : block_end;
int block_size = block_end - block_start;
//{// process the vertices in interleave mode
// int vidx,v,v_start_pos,v_degree_scan,ei;
// for(int idx=tid; idx < block_size; idx+= blockDim.x){
// int eid = block_start + idx;
// vidx = __upper_bound(smem.v_degree_scan, smem.vidx_cur_size, eid)-1;
// v = smem.v[vidx];
// if(v < 0) continue;
// v_start_pos = smem.v_start_pos[vidx];
// v_degree_scan = smem.v_degree_scan[vidx];
// int uidx = eid-v_degree_scan;
// int u = __ldg(strict_adj_list+uidx+v_start_pos);
// ei = uidx + v_start_pos;
// bool toprocess = true;
// // Data source must be active all conf_fromall is enabled
// if(toprocess && !conf.conf_fromall)
// toprocess = as.bitmap.is_active(u);
// if(toprocess){
// auto vdata = f.emit(u, strict_edgedata+ei, g);
// // this vertex may be processed in other CTAs, thus atomic must remain.
// f.compAtomic(f.wa_of(v), vdata, g);
// }
// } //for
//} // interleave
{ // process the vertices in stride mode
bool remain = false;
bool change = true;
vdata_t reduction;
int vidx,v,v_start_pos,v_degree_scan,limit;
int base = block_size / blockDim.x;
int rest = block_size % blockDim.x;
int thread_workloads = base + ((threadIdx.x < rest) ? 1:0);
int thread_start = (base*threadIdx.x) + ((threadIdx.x < rest) ? threadIdx.x : rest);
for(int idx=thread_start; idx < thread_start+thread_workloads; idx++){
int eid = block_start + idx;
if(change){
vidx = __upper_bound(smem.v_degree_scan, smem.vidx_cur_size, eid)-1;
v = smem.v[vidx];
if(v < 0) continue;
v_start_pos = smem.v_start_pos[vidx];
v_degree_scan = smem.v_degree_scan[vidx];
limit = (vidx==SCRATH-1?block_end:smem.v_degree_scan[vidx+1]);
}
if(eid+1 == limit) change = true;
else change = false;
int uidx = eid-v_degree_scan;
int u = __ldg(strict_adj_list+uidx+v_start_pos);
int ei = uidx + v_start_pos;
bool toprocess = true;
// Data source must be active all conf_fromall is enabled
if(toprocess && !conf.conf_fromall)
toprocess = as.bitmap.is_active(u);
if(toprocess){
auto vdata = f.emit(u, strict_edgedata+ei, g);
if(!remain) reduction = vdata;
else f.comp(&reduction, vdata, g);
remain = true;
}
if(change && remain) {
//this vertex may be processed in other CTAs, thus atomic must remain.
f.compAtomic(f.wa_of(v), reduction, g);
remain = false;
}
} //for
if(remain){
f.compAtomic(f.wa_of(v), reduction, g);
remain = false;
}
} // stride
} //while
}
template<>
struct ExpandProxy<VC,STRICT,Push>{
template<typename E, typename F>
static void expand(active_set_t& as, device_graph_t<CSR,E> g, F f, config_t conf){
if(!conf.conf_inherit){
//step 1: init
int nactives = (conf.conf_fuse_inspect?as.get_size_host():as.get_size_host_cached());
if(nactives==0){as.halt_host();return;}
cudaMemset(as.workset.dg_size,0,sizeof(int));
//step 2: prepare the degrees and the scaned degrees
if(as.fmt==Queue){
if(as.queue.mode == Normal) __prepare<Queue,Normal><<<1+conf.ctanum/10, conf.thdnum>>>(as, g, conf);
else __prepare<Queue,Cached><<<1+conf.ctanum/10, conf.thdnum>>>(as, g, conf);
}else __prepare<Bitmap,Normal><<<1+conf.ctanum/10, conf.thdnum>>>(as, g, conf);
mgpu::scan<mgpu::scan_type_exc>(as.workset.dg_degree, nactives, as.workset.dg_udegree, mgpu::plus_t<int>(), as.workset.dg_size, *as.context);
//step 3: compute the sorted block index.
int active_edges = as.workset.get_usize();
int blksz = conf.ctanum;
__memsetIdx<<<1, conf.ctanum>>>(as.workset.dg_seid_per_blk, blksz,
1+active_edges/blksz, active_edges%blksz, active_edges);
mgpu::sorted_search<mgpu::bounds_lower>(as.workset.dg_seid_per_blk, blksz+1,
as.workset.dg_udegree, nactives,
as.workset.dg_idx, mgpu::less_t<int>(), *as.context);
}
if(conf.conf_fuse_inspect){
Launch_Expand_VC(STRICT_fused, as, g, f, conf);
}else {
if(conf.conf_pruning && conf.conf_asfmt==Queue && as.queue.mode==Normal) {
// this is just to avoid warp degradation, f**k the nvcc
__expand_VC_STRICT_wtf<Queue,Normal><<<conf.ctanum, conf.thdnum>>>(as, g, f, conf);
} else {
Launch_Expand_VC(STRICT, as, g, f, conf);
}
}
//__expand_VC_STRICT<<<conf.ctanum, conf.thdnum>>>(as, g, f, conf);
//cudaThreadSynchronize();
}
template<typename E, typename F>
static void expand(active_set_t as, device_graph_t<COO,E> g, F f, config_t conf){}
};
template<>
struct ExpandProxy<VC,STRICT,Pull>{
template<typename E, typename F>
static void expand(active_set_t& as, device_graph_t<CSR,E> g, F f, config_t conf){
if(!conf.conf_inherit){
// step 1: init
int nactives = as.get_size_host();
cudaMemset(as.workset.dg_size,0,sizeof(int));
// step 2: prepare the degree and the scaned degree
if(as.fmt == Queue){
if(as.queue.mode == Normal) __prepare<Queue,Normal><<<1+conf.ctanum/10, conf.thdnum>>>(as, g, conf);
else __prepare<Queue,Cached><<<1+conf.ctanum/10, conf.thdnum>>>(as, g, conf);
}else __prepare<Bitmap,Normal><<<1+conf.ctanum/10, conf.thdnum>>>(as,g,conf);
//mgpu::scan<mgpu::scan_type_exc>(as.workset.dg_degree, nactives, as.workset.dg_udegree, *as.context);
mgpu::scan<mgpu::scan_type_exc>(as.workset.dg_degree, nactives, as.workset.dg_udegree, mgpu::plus_t<int>(), as.workset.dg_size, *as.context);
// step 3: compute the sorted block index.
int active_edges = as.workset.get_usize();
int blksz = conf.ctanum;
__memsetIdx<<<1,conf.ctanum>>>(as.workset.dg_seid_per_blk, blksz,
1+active_edges/blksz, active_edges%blksz, active_edges);
mgpu::sorted_search<mgpu::bounds_lower>(as.workset.dg_seid_per_blk, blksz+1,
as.workset.dg_udegree, nactives,
as.workset.dg_idx, mgpu::less_t<int>(), *as.context);
//as.context->synchronize();
}
Launch_RExpand_VC(STRICT, as, g, f, conf);
//__rexpand_VC_STRICT<<<conf.ctanum, conf.thdnum>>>(as, g, f, conf);
//cudaThreadSynchronize();
}
template<typename E, typename F>
static void expand(active_set_t as, device_graph_t<COO,E> g, F f, config_t conf){}
};
#endif
|
the_stack
|
* \test Tests conversion between matrices with different numeric type
**/
//
// *** System
//
#include <iostream>
#include <iomanip>
#include <vector>
//
// *** ViennaCL
//
//#define VIENNACL_DEBUG_ALL
#include "viennacl/backend/memory.hpp"
#include "viennacl/matrix.hpp"
#include "viennacl/matrix_proxy.hpp"
template<typename NumericT, typename MatrixT>
int check(std::vector<NumericT> const & std_dest,
std::size_t start1, std::size_t inc1, std::size_t size1,
std::size_t start2, std::size_t inc2, std::size_t size2, std::size_t internal_size2,
MatrixT const & vcl_dest)
{
viennacl::backend::typesafe_host_array<NumericT> tempmat(vcl_dest.handle(), vcl_dest.internal_size());
viennacl::backend::memory_read(vcl_dest.handle(), 0, tempmat.raw_size(), reinterpret_cast<NumericT*>(tempmat.get()));
for (std::size_t i=0; i < size1; ++i)
{
for (std::size_t j=0; j < size2; ++j)
{
NumericT value_std = std_dest[(i*inc1 + start1) * internal_size2 + (j*inc2 + start2)];
NumericT value_dest = vcl_dest.row_major() ? tempmat[(i * vcl_dest.stride1() + vcl_dest.start1()) * vcl_dest.internal_size2() + (j * vcl_dest.stride2() + vcl_dest.start2())]
: tempmat[(i * vcl_dest.stride1() + vcl_dest.start1()) + (j * vcl_dest.stride2() + vcl_dest.start2()) * vcl_dest.internal_size1()];
if (value_std < value_dest || value_std > value_dest)
{
std::cerr << "Failure at row " << i << ", col " << j << ": STL value " << value_std << ", ViennaCL value " << value_dest << std::endl;
return EXIT_FAILURE;
}
}
}
return EXIT_SUCCESS;
}
//
// -------------------------------------------------------------
//
template<typename STLVectorT1, typename STLVectorT2, typename ViennaCLVectorT1, typename ViennaCLVectorT2 >
int test(STLVectorT1 & std_src, std::size_t start1_src, std::size_t inc1_src, std::size_t size1_src, std::size_t start2_src, std::size_t inc2_src, std::size_t size2_src, std::size_t internal_size2_src,
STLVectorT2 & std_dest, std::size_t start1_dest, std::size_t inc1_dest, std::size_t size1_dest, std::size_t start2_dest, std::size_t inc2_dest, std::size_t size2_dest, std::size_t internal_size2_dest,
ViennaCLVectorT1 const & vcl_src, ViennaCLVectorT2 & vcl_dest)
{
assert(size1_src == size1_dest && bool("Size1 mismatch for STL matrices"));
assert(size2_src == size2_dest && bool("Size2 mismatch for STL matrices"));
assert(vcl_src.size1() == vcl_dest.size1() && bool("Size1 mismatch for ViennaCL matrices"));
assert(vcl_src.size2() == vcl_dest.size2() && bool("Size2 mismatch for ViennaCL matrices"));
assert(size1_src == vcl_src.size1() && bool("Size1 mismatch for STL and ViennaCL matrices"));
assert(size2_src == vcl_src.size2() && bool("Size2 mismatch for STL and ViennaCL matrices"));
typedef typename STLVectorT2::value_type DestNumericT;
for (std::size_t i=0; i<size1_src; ++i)
for (std::size_t j=0; j<size2_src; ++j)
std_dest[(start1_dest + i * inc1_dest) * internal_size2_dest + (start2_dest + j * inc2_dest)] = static_cast<DestNumericT>(std_src[(start1_src + i * inc1_src) * internal_size2_src + (start2_src + j * inc2_src)]);
vcl_dest = vcl_src; // here is the conversion taking place
if (check(std_dest, start1_dest, inc1_dest, size1_dest, start2_dest, inc2_dest, size2_dest, internal_size2_dest, vcl_dest) != EXIT_SUCCESS)
return EXIT_FAILURE;
if (vcl_src.row_major())
{
viennacl::matrix<DestNumericT> A(vcl_src);
if (check(std_dest, start1_dest, inc1_dest, size1_dest, start2_dest, inc2_dest, size2_dest, internal_size2_dest, A) != EXIT_SUCCESS)
return EXIT_FAILURE;
}
else
{
viennacl::matrix<DestNumericT, viennacl::column_major> A(vcl_src);
if (check(std_dest, start1_dest, inc1_dest, size1_dest, start2_dest, inc2_dest, size2_dest, internal_size2_dest, A) != EXIT_SUCCESS)
return EXIT_FAILURE;
}
// --------------------------------------------------------------------------
return EXIT_SUCCESS;
}
inline std::string type_string(unsigned int) { return "unsigned int"; }
inline std::string type_string(int) { return "int"; }
inline std::string type_string(unsigned long) { return "unsigned long"; }
inline std::string type_string(long) { return "long"; }
inline std::string type_string(float) { return "float"; }
inline std::string type_string(double) { return "double"; }
template<typename LayoutT, typename FromNumericT, typename ToNumericT>
int test()
{
int retval = EXIT_SUCCESS;
std::cout << std::endl;
std::cout << "-----------------------------------------------" << std::endl;
std::cout << std::endl;
std::cout << "Conversion test from " << type_string(FromNumericT()) << " to " << type_string(ToNumericT()) << std::endl;
std::cout << std::endl;
std::size_t full_size1 = 578;
std::size_t small_size1 = full_size1 / 4;
std::size_t full_size2 = 687;
std::size_t small_size2 = full_size2 / 4;
//
// Set up STL objects
//
std::vector<FromNumericT> std_src(full_size1 * full_size2);
std::vector<std::vector<FromNumericT> > std_src2(full_size1, std::vector<FromNumericT>(full_size2));
std::vector<std::vector<FromNumericT> > std_src_small(small_size1, std::vector<FromNumericT>(small_size2));
std::vector<ToNumericT> std_dest(std_src.size());
for (std::size_t i=0; i<full_size1; ++i)
for (std::size_t j=0; j<full_size2; ++j)
{
std_src[i * full_size2 + j] = FromNumericT(1.0) + FromNumericT(i) + FromNumericT(j);
std_src2[i][j] = FromNumericT(1.0) + FromNumericT(i) + FromNumericT(j);
if (i < small_size1 && j < small_size2)
std_src_small[i][j] = FromNumericT(1.0) + FromNumericT(i) + FromNumericT(j);
}
//
// Set up ViennaCL objects
//
viennacl::matrix<FromNumericT, LayoutT> vcl_src(full_size1, full_size2);
viennacl::matrix<ToNumericT, LayoutT> vcl_dest(full_size1, full_size2);
viennacl::copy(std_src2, vcl_src);
viennacl::matrix<FromNumericT, LayoutT> vcl_src_small(small_size1, small_size2);
viennacl::copy(std_src_small, vcl_src_small);
viennacl::matrix<ToNumericT, LayoutT> vcl_dest_small(small_size1, small_size2);
std::size_t r11_start = 1 + full_size1 / 4;
std::size_t r11_stop = r11_start + small_size1;
viennacl::range vcl_r11(r11_start, r11_stop);
std::size_t r12_start = 2 * full_size1 / 4;
std::size_t r12_stop = r12_start + small_size1;
viennacl::range vcl_r12(r12_start, r12_stop);
std::size_t r21_start = 2 * full_size2 / 4;
std::size_t r21_stop = r21_start + small_size2;
viennacl::range vcl_r21(r21_start, r21_stop);
std::size_t r22_start = 1 + full_size2 / 4;
std::size_t r22_stop = r22_start + small_size2;
viennacl::range vcl_r22(r22_start, r22_stop);
viennacl::matrix_range< viennacl::matrix<FromNumericT, LayoutT> > vcl_range_src(vcl_src, vcl_r11, vcl_r21);
viennacl::matrix_range< viennacl::matrix<ToNumericT, LayoutT> > vcl_range_dest(vcl_dest, vcl_r12, vcl_r22);
std::size_t s11_start = 1 + full_size1 / 5;
std::size_t s11_inc = 3;
std::size_t s11_size = small_size1;
viennacl::slice vcl_s11(s11_start, s11_inc, s11_size);
std::size_t s12_start = 2 * full_size1 / 5;
std::size_t s12_inc = 2;
std::size_t s12_size = small_size1;
viennacl::slice vcl_s12(s12_start, s12_inc, s12_size);
std::size_t s21_start = 1 + full_size2 / 5;
std::size_t s21_inc = 3;
std::size_t s21_size = small_size2;
viennacl::slice vcl_s21(s21_start, s21_inc, s21_size);
std::size_t s22_start = 2 * full_size2 / 5;
std::size_t s22_inc = 2;
std::size_t s22_size = small_size2;
viennacl::slice vcl_s22(s22_start, s22_inc, s22_size);
viennacl::matrix_slice< viennacl::matrix<FromNumericT, LayoutT> > vcl_slice_src(vcl_src, vcl_s11, vcl_s21);
viennacl::matrix_slice< viennacl::matrix<ToNumericT, LayoutT> > vcl_slice_dest(vcl_dest, vcl_s12, vcl_s22);
//
// Now start running tests for vectors, ranges and slices:
//
std::cout << " ** vcl_src = matrix, vcl_dest = matrix **" << std::endl;
retval = test(std_src, 0, 1, full_size1, 0, 1, full_size2, full_size2,
std_dest, 0, 1, full_size1, 0, 1, full_size2, full_size2,
vcl_src, vcl_dest);
if (retval != EXIT_SUCCESS)
return EXIT_FAILURE;
std::cout << " ** vcl_src = matrix, vcl_dest = range **" << std::endl;
retval = test(std_src, 0, 1, small_size1, 0, 1, small_size2, full_size2,
std_dest, r12_start, 1, r12_stop - r12_start, r22_start, 1, r22_stop - r22_start, full_size2,
vcl_src_small, vcl_range_dest);
if (retval != EXIT_SUCCESS)
return EXIT_FAILURE;
std::cout << " ** vcl_src = matrix, vcl_dest = slice **" << std::endl;
retval = test(std_src, 0, 1, small_size1, 0, 1, small_size2, full_size2,
std_dest, s12_start, s12_inc, s12_size, s22_start, s22_inc, s22_size, full_size2,
vcl_src_small, vcl_slice_dest);
if (retval != EXIT_SUCCESS)
return EXIT_FAILURE;
///////
std::cout << " ** vcl_src = range, vcl_dest = matrix **" << std::endl;
retval = test(std_src, r11_start, 1, r11_stop - r11_start, r21_start, 1, r21_stop - r21_start, full_size2,
std_dest, 0, 1, small_size1, 0, 1, small_size2, full_size2,
vcl_range_src, vcl_dest_small);
if (retval != EXIT_SUCCESS)
return EXIT_FAILURE;
std::cout << " ** vcl_src = range, vcl_dest = range **" << std::endl;
retval = test(std_src, r11_start, 1, r11_stop - r11_start, r21_start, 1, r21_stop - r21_start, full_size2,
std_dest, r12_start, 1, r12_stop - r12_start, r22_start, 1, r22_stop - r22_start, full_size2,
vcl_range_src, vcl_range_dest);
if (retval != EXIT_SUCCESS)
return EXIT_FAILURE;
std::cout << " ** vcl_src = range, vcl_dest = slice **" << std::endl;
retval = test(std_src, r11_start, 1, r11_stop - r11_start, r21_start, 1, r21_stop - r21_start, full_size2,
std_dest, s12_start, s12_inc, s12_size, s22_start, s22_inc, s22_size, full_size2,
vcl_range_src, vcl_slice_dest);
if (retval != EXIT_SUCCESS)
return EXIT_FAILURE;
///////
std::cout << " ** vcl_src = slice, vcl_dest = matrix **" << std::endl;
retval = test(std_src, s11_start, s11_inc, s11_size, s21_start, s21_inc, s21_size, full_size2,
std_dest, 0, 1, small_size1, 0, 1, small_size2, full_size2,
vcl_slice_src, vcl_dest_small);
if (retval != EXIT_SUCCESS)
return EXIT_FAILURE;
std::cout << " ** vcl_src = slice, vcl_dest = range **" << std::endl;
retval = test(std_src, s11_start, s11_inc, s11_size, s21_start, s21_inc, s21_size, full_size2,
std_dest, r12_start, 1, r12_stop - r12_start, r22_start, 1, r22_stop - r22_start, full_size2,
vcl_slice_src, vcl_range_dest);
if (retval != EXIT_SUCCESS)
return EXIT_FAILURE;
std::cout << " ** vcl_src = slice, vcl_dest = slice **" << std::endl;
retval = test(std_src, s11_start, s11_inc, s11_size, s21_start, s21_inc, s21_size, full_size2,
std_dest, s12_start, s12_inc, s12_size, s22_start, s22_inc, s22_size, full_size2,
vcl_slice_src, vcl_slice_dest);
if (retval != EXIT_SUCCESS)
return EXIT_FAILURE;
return EXIT_SUCCESS;
}
template<typename FromNumericT, typename ToNumericT>
int test()
{
int retval = test<viennacl::row_major, FromNumericT, ToNumericT>();
if (retval == EXIT_SUCCESS)
{
retval = test<viennacl::column_major, FromNumericT, ToNumericT>();
if (retval != EXIT_SUCCESS)
std::cerr << "Test failed for column-major!" << std::endl;
}
else
std::cerr << "Test failed for row-major!" << std::endl;
return retval;
}
//
// -------------------------------------------------------------
//
int main()
{
std::cout << std::endl;
std::cout << "----------------------------------------------" << std::endl;
std::cout << "----------------------------------------------" << std::endl;
std::cout << "## Test :: Type conversion test for matrices " << std::endl;
std::cout << "----------------------------------------------" << std::endl;
std::cout << "----------------------------------------------" << std::endl;
std::cout << std::endl;
int retval = EXIT_SUCCESS;
//
// from int
//
retval = test<int, int>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
retval = test<int, unsigned int>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
retval = test<int, long>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
retval = test<int, unsigned long>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
retval = test<int, float>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
retval = test<int, unsigned long>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
#ifdef VIENNACL_WITH_OPENCL
if ( viennacl::ocl::current_device().double_support() )
#endif
{
retval = test<int, double>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
}
//
// from unsigned int
//
retval = test<unsigned int, int>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
retval = test<unsigned int, unsigned int>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
retval = test<unsigned int, long>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
retval = test<unsigned int, unsigned long>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
retval = test<unsigned int, float>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
retval = test<unsigned int, unsigned long>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
#ifdef VIENNACL_WITH_OPENCL
if ( viennacl::ocl::current_device().double_support() )
#endif
{
retval = test<unsigned int, double>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
}
//
// from long
//
retval = test<long, int>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
retval = test<long, unsigned int>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
retval = test<long, long>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
retval = test<long, unsigned long>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
retval = test<long, float>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
retval = test<long, unsigned long>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
#ifdef VIENNACL_WITH_OPENCL
if ( viennacl::ocl::current_device().double_support() )
#endif
{
retval = test<long, double>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
}
//
// from unsigned long
//
retval = test<unsigned long, int>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
retval = test<unsigned long, unsigned int>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
retval = test<unsigned long, long>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
retval = test<unsigned long, unsigned long>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
retval = test<unsigned long, float>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
retval = test<unsigned long, unsigned long>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
#ifdef VIENNACL_WITH_OPENCL
if ( viennacl::ocl::current_device().double_support() )
#endif
{
retval = test<unsigned long, double>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
}
//
// from float
//
retval = test<float, int>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
retval = test<float, unsigned int>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
retval = test<float, long>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
retval = test<float, unsigned long>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
retval = test<float, float>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
retval = test<float, unsigned long>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
#ifdef VIENNACL_WITH_OPENCL
if ( viennacl::ocl::current_device().double_support() )
#endif
{
retval = test<float, double>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
}
//
// from double
//
#ifdef VIENNACL_WITH_OPENCL
if ( viennacl::ocl::current_device().double_support() )
#endif
{
retval = test<double, int>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
retval = test<double, unsigned int>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
retval = test<double, long>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
retval = test<double, unsigned long>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
retval = test<double, float>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
retval = test<double, unsigned long>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
retval = test<double, double>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
}
std::cout << std::endl;
std::cout << "------- Test completed --------" << std::endl;
std::cout << std::endl;
return retval;
}
|
the_stack
|
__constant__ float eps=1e-8;
__constant__ float grid_size = 1.0;
__constant__ float distance_empty = 0.4;
// first row, topology
// second row, number of triangles in the corresponding topology
// up to __3__ triangles
__constant__ int acceptTopology[2][48] = {{1, 2, 3, 4, 6, 7, 8, 9, 11, 12, 13, 14, 15, 16, 17, 19, 25, 31, 32, 34, 35, 38, 47, 48, 49, 50, 51, 55, 59, 63, 64, 68, 70, 76, 79, 96, 98, 100, 102, 103, 110, 111, 112, 115, 118, 119, 127, 0},
{1, 1, 2, 1, 2, 3, 1, 2, 3, 2, 3, 3, 2, 1, 2, 3, 3, 3, 1, 2, 3, 3, 3, 2, 3, 3, 2, 3, 3, 2, 1, 2, 3, 3, 3, 2, 3, 3, 2, 3, 3, 2, 3, 3, 3, 2, 1, 0}};
__constant__ int triTable[256][16] =
{{-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 1, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 8, 3, 9, 8, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 3, 1, 2, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{9, 2, 10, 0, 2, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{2, 8, 3, 2, 10, 8, 10, 9, 8, -1, -1, -1, -1, -1, -1, -1},
{3, 11, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 11, 2, 8, 11, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 9, 0, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 11, 2, 1, 9, 11, 9, 8, 11, -1, -1, -1, -1, -1, -1, -1},
{3, 10, 1, 11, 10, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 10, 1, 0, 8, 10, 8, 11, 10, -1, -1, -1, -1, -1, -1, -1},
{3, 9, 0, 3, 11, 9, 11, 10, 9, -1, -1, -1, -1, -1, -1, -1},
{9, 8, 10, 10, 8, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 7, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 3, 0, 7, 3, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 1, 9, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 1, 9, 4, 7, 1, 7, 3, 1, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 10, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{3, 4, 7, 3, 0, 4, 1, 2, 10, -1, -1, -1, -1, -1, -1, -1},
{9, 2, 10, 9, 0, 2, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1},
{2, 10, 9, 2, 9, 7, 2, 7, 3, 7, 9, 4, -1, -1, -1, -1},
{8, 4, 7, 3, 11, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{11, 4, 7, 11, 2, 4, 2, 0, 4, -1, -1, -1, -1, -1, -1, -1},
{9, 0, 1, 8, 4, 7, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1},
{4, 7, 11, 9, 4, 11, 9, 11, 2, 9, 2, 1, -1, -1, -1, -1},
{3, 10, 1, 3, 11, 10, 7, 8, 4, -1, -1, -1, -1, -1, -1, -1},
{1, 11, 10, 1, 4, 11, 1, 0, 4, 7, 11, 4, -1, -1, -1, -1},
{4, 7, 8, 9, 0, 11, 9, 11, 10, 11, 0, 3, -1, -1, -1, -1},
{4, 7, 11, 4, 11, 9, 9, 11, 10, -1, -1, -1, -1, -1, -1, -1},
{9, 5, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{9, 5, 4, 0, 8, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 5, 4, 1, 5, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{8, 5, 4, 8, 3, 5, 3, 1, 5, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 10, 9, 5, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{3, 0, 8, 1, 2, 10, 4, 9, 5, -1, -1, -1, -1, -1, -1, -1},
{5, 2, 10, 5, 4, 2, 4, 0, 2, -1, -1, -1, -1, -1, -1, -1},
{2, 10, 5, 3, 2, 5, 3, 5, 4, 3, 4, 8, -1, -1, -1, -1},
{9, 5, 4, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 11, 2, 0, 8, 11, 4, 9, 5, -1, -1, -1, -1, -1, -1, -1},
{0, 5, 4, 0, 1, 5, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1},
{2, 1, 5, 2, 5, 8, 2, 8, 11, 4, 8, 5, -1, -1, -1, -1},
{10, 3, 11, 10, 1, 3, 9, 5, 4, -1, -1, -1, -1, -1, -1, -1},
{4, 9, 5, 0, 8, 1, 8, 10, 1, 8, 11, 10, -1, -1, -1, -1},
{5, 4, 0, 5, 0, 11, 5, 11, 10, 11, 0, 3, -1, -1, -1, -1},
{5, 4, 8, 5, 8, 10, 10, 8, 11, -1, -1, -1, -1, -1, -1, -1},
{9, 7, 8, 5, 7, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{9, 3, 0, 9, 5, 3, 5, 7, 3, -1, -1, -1, -1, -1, -1, -1},
{0, 7, 8, 0, 1, 7, 1, 5, 7, -1, -1, -1, -1, -1, -1, -1},
{1, 5, 3, 3, 5, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{9, 7, 8, 9, 5, 7, 10, 1, 2, -1, -1, -1, -1, -1, -1, -1},
{10, 1, 2, 9, 5, 0, 5, 3, 0, 5, 7, 3, -1, -1, -1, -1},
{8, 0, 2, 8, 2, 5, 8, 5, 7, 10, 5, 2, -1, -1, -1, -1},
{2, 10, 5, 2, 5, 3, 3, 5, 7, -1, -1, -1, -1, -1, -1, -1},
{7, 9, 5, 7, 8, 9, 3, 11, 2, -1, -1, -1, -1, -1, -1, -1},
{9, 5, 7, 9, 7, 2, 9, 2, 0, 2, 7, 11, -1, -1, -1, -1},
{2, 3, 11, 0, 1, 8, 1, 7, 8, 1, 5, 7, -1, -1, -1, -1},
{11, 2, 1, 11, 1, 7, 7, 1, 5, -1, -1, -1, -1, -1, -1, -1},
{9, 5, 8, 8, 5, 7, 10, 1, 3, 10, 3, 11, -1, -1, -1, -1},
{5, 7, 0, 5, 0, 9, 7, 11, 0, 1, 0, 10, 11, 10, 0, -1},
{11, 10, 0, 11, 0, 3, 10, 5, 0, 8, 0, 7, 5, 7, 0, -1},
{11, 10, 5, 7, 11, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{10, 6, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 3, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{9, 0, 1, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 8, 3, 1, 9, 8, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1},
{1, 6, 5, 2, 6, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 6, 5, 1, 2, 6, 3, 0, 8, -1, -1, -1, -1, -1, -1, -1},
{9, 6, 5, 9, 0, 6, 0, 2, 6, -1, -1, -1, -1, -1, -1, -1},
{5, 9, 8, 5, 8, 2, 5, 2, 6, 3, 2, 8, -1, -1, -1, -1},
{2, 3, 11, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{11, 0, 8, 11, 2, 0, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1},
{0, 1, 9, 2, 3, 11, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1},
{5, 10, 6, 1, 9, 2, 9, 11, 2, 9, 8, 11, -1, -1, -1, -1},
{6, 3, 11, 6, 5, 3, 5, 1, 3, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 11, 0, 11, 5, 0, 5, 1, 5, 11, 6, -1, -1, -1, -1},
{3, 11, 6, 0, 3, 6, 0, 6, 5, 0, 5, 9, -1, -1, -1, -1},
{6, 5, 9, 6, 9, 11, 11, 9, 8, -1, -1, -1, -1, -1, -1, -1},
{5, 10, 6, 4, 7, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 3, 0, 4, 7, 3, 6, 5, 10, -1, -1, -1, -1, -1, -1, -1},
{1, 9, 0, 5, 10, 6, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1},
{10, 6, 5, 1, 9, 7, 1, 7, 3, 7, 9, 4, -1, -1, -1, -1},
{6, 1, 2, 6, 5, 1, 4, 7, 8, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 5, 5, 2, 6, 3, 0, 4, 3, 4, 7, -1, -1, -1, -1},
{8, 4, 7, 9, 0, 5, 0, 6, 5, 0, 2, 6, -1, -1, -1, -1},
{7, 3, 9, 7, 9, 4, 3, 2, 9, 5, 9, 6, 2, 6, 9, -1},
{3, 11, 2, 7, 8, 4, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1},
{5, 10, 6, 4, 7, 2, 4, 2, 0, 2, 7, 11, -1, -1, -1, -1},
{0, 1, 9, 4, 7, 8, 2, 3, 11, 5, 10, 6, -1, -1, -1, -1},
{9, 2, 1, 9, 11, 2, 9, 4, 11, 7, 11, 4, 5, 10, 6, -1},
{8, 4, 7, 3, 11, 5, 3, 5, 1, 5, 11, 6, -1, -1, -1, -1},
{5, 1, 11, 5, 11, 6, 1, 0, 11, 7, 11, 4, 0, 4, 11, -1},
{0, 5, 9, 0, 6, 5, 0, 3, 6, 11, 6, 3, 8, 4, 7, -1},
{6, 5, 9, 6, 9, 11, 4, 7, 9, 7, 11, 9, -1, -1, -1, -1},
{10, 4, 9, 6, 4, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 10, 6, 4, 9, 10, 0, 8, 3, -1, -1, -1, -1, -1, -1, -1},
{10, 0, 1, 10, 6, 0, 6, 4, 0, -1, -1, -1, -1, -1, -1, -1},
{8, 3, 1, 8, 1, 6, 8, 6, 4, 6, 1, 10, -1, -1, -1, -1},
{1, 4, 9, 1, 2, 4, 2, 6, 4, -1, -1, -1, -1, -1, -1, -1},
{3, 0, 8, 1, 2, 9, 2, 4, 9, 2, 6, 4, -1, -1, -1, -1},
{0, 2, 4, 4, 2, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{8, 3, 2, 8, 2, 4, 4, 2, 6, -1, -1, -1, -1, -1, -1, -1},
{10, 4, 9, 10, 6, 4, 11, 2, 3, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 2, 2, 8, 11, 4, 9, 10, 4, 10, 6, -1, -1, -1, -1},
{3, 11, 2, 0, 1, 6, 0, 6, 4, 6, 1, 10, -1, -1, -1, -1},
{6, 4, 1, 6, 1, 10, 4, 8, 1, 2, 1, 11, 8, 11, 1, -1},
{9, 6, 4, 9, 3, 6, 9, 1, 3, 11, 6, 3, -1, -1, -1, -1},
{8, 11, 1, 8, 1, 0, 11, 6, 1, 9, 1, 4, 6, 4, 1, -1},
{3, 11, 6, 3, 6, 0, 0, 6, 4, -1, -1, -1, -1, -1, -1, -1},
{6, 4, 8, 11, 6, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{7, 10, 6, 7, 8, 10, 8, 9, 10, -1, -1, -1, -1, -1, -1, -1},
{0, 7, 3, 0, 10, 7, 0, 9, 10, 6, 7, 10, -1, -1, -1, -1},
{10, 6, 7, 1, 10, 7, 1, 7, 8, 1, 8, 0, -1, -1, -1, -1},
{10, 6, 7, 10, 7, 1, 1, 7, 3, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 6, 1, 6, 8, 1, 8, 9, 8, 6, 7, -1, -1, -1, -1},
{2, 6, 9, 2, 9, 1, 6, 7, 9, 0, 9, 3, 7, 3, 9, -1},
{7, 8, 0, 7, 0, 6, 6, 0, 2, -1, -1, -1, -1, -1, -1, -1},
{7, 3, 2, 6, 7, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{2, 3, 11, 10, 6, 8, 10, 8, 9, 8, 6, 7, -1, -1, -1, -1},
{2, 0, 7, 2, 7, 11, 0, 9, 7, 6, 7, 10, 9, 10, 7, -1},
{1, 8, 0, 1, 7, 8, 1, 10, 7, 6, 7, 10, 2, 3, 11, -1},
{11, 2, 1, 11, 1, 7, 10, 6, 1, 6, 7, 1, -1, -1, -1, -1},
{8, 9, 6, 8, 6, 7, 9, 1, 6, 11, 6, 3, 1, 3, 6, -1},
{0, 9, 1, 11, 6, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{7, 8, 0, 7, 0, 6, 3, 11, 0, 11, 6, 0, -1, -1, -1, -1},
{7, 11, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{7, 6, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{3, 0, 8, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 1, 9, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{8, 1, 9, 8, 3, 1, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1},
{10, 1, 2, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 10, 3, 0, 8, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1},
{2, 9, 0, 2, 10, 9, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1},
{6, 11, 7, 2, 10, 3, 10, 8, 3, 10, 9, 8, -1, -1, -1, -1},
{7, 2, 3, 6, 2, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{7, 0, 8, 7, 6, 0, 6, 2, 0, -1, -1, -1, -1, -1, -1, -1},
{2, 7, 6, 2, 3, 7, 0, 1, 9, -1, -1, -1, -1, -1, -1, -1},
{1, 6, 2, 1, 8, 6, 1, 9, 8, 8, 7, 6, -1, -1, -1, -1},
{10, 7, 6, 10, 1, 7, 1, 3, 7, -1, -1, -1, -1, -1, -1, -1},
{10, 7, 6, 1, 7, 10, 1, 8, 7, 1, 0, 8, -1, -1, -1, -1},
{0, 3, 7, 0, 7, 10, 0, 10, 9, 6, 10, 7, -1, -1, -1, -1},
{7, 6, 10, 7, 10, 8, 8, 10, 9, -1, -1, -1, -1, -1, -1, -1},
{6, 8, 4, 11, 8, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{3, 6, 11, 3, 0, 6, 0, 4, 6, -1, -1, -1, -1, -1, -1, -1},
{8, 6, 11, 8, 4, 6, 9, 0, 1, -1, -1, -1, -1, -1, -1, -1},
{9, 4, 6, 9, 6, 3, 9, 3, 1, 11, 3, 6, -1, -1, -1, -1},
{6, 8, 4, 6, 11, 8, 2, 10, 1, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 10, 3, 0, 11, 0, 6, 11, 0, 4, 6, -1, -1, -1, -1},
{4, 11, 8, 4, 6, 11, 0, 2, 9, 2, 10, 9, -1, -1, -1, -1},
{10, 9, 3, 10, 3, 2, 9, 4, 3, 11, 3, 6, 4, 6, 3, -1},
{8, 2, 3, 8, 4, 2, 4, 6, 2, -1, -1, -1, -1, -1, -1, -1},
{0, 4, 2, 4, 6, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 9, 0, 2, 3, 4, 2, 4, 6, 4, 3, 8, -1, -1, -1, -1},
{1, 9, 4, 1, 4, 2, 2, 4, 6, -1, -1, -1, -1, -1, -1, -1},
{8, 1, 3, 8, 6, 1, 8, 4, 6, 6, 10, 1, -1, -1, -1, -1},
{10, 1, 0, 10, 0, 6, 6, 0, 4, -1, -1, -1, -1, -1, -1, -1},
{4, 6, 3, 4, 3, 8, 6, 10, 3, 0, 3, 9, 10, 9, 3, -1},
{10, 9, 4, 6, 10, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 9, 5, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 3, 4, 9, 5, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1},
{5, 0, 1, 5, 4, 0, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1},
{11, 7, 6, 8, 3, 4, 3, 5, 4, 3, 1, 5, -1, -1, -1, -1},
{9, 5, 4, 10, 1, 2, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1},
{6, 11, 7, 1, 2, 10, 0, 8, 3, 4, 9, 5, -1, -1, -1, -1},
{7, 6, 11, 5, 4, 10, 4, 2, 10, 4, 0, 2, -1, -1, -1, -1},
{3, 4, 8, 3, 5, 4, 3, 2, 5, 10, 5, 2, 11, 7, 6, -1},
{7, 2, 3, 7, 6, 2, 5, 4, 9, -1, -1, -1, -1, -1, -1, -1},
{9, 5, 4, 0, 8, 6, 0, 6, 2, 6, 8, 7, -1, -1, -1, -1},
{3, 6, 2, 3, 7, 6, 1, 5, 0, 5, 4, 0, -1, -1, -1, -1},
{6, 2, 8, 6, 8, 7, 2, 1, 8, 4, 8, 5, 1, 5, 8, -1},
{9, 5, 4, 10, 1, 6, 1, 7, 6, 1, 3, 7, -1, -1, -1, -1},
{1, 6, 10, 1, 7, 6, 1, 0, 7, 8, 7, 0, 9, 5, 4, -1},
{4, 0, 10, 4, 10, 5, 0, 3, 10, 6, 10, 7, 3, 7, 10, -1},
{7, 6, 10, 7, 10, 8, 5, 4, 10, 4, 8, 10, -1, -1, -1, -1},
{6, 9, 5, 6, 11, 9, 11, 8, 9, -1, -1, -1, -1, -1, -1, -1},
{3, 6, 11, 0, 6, 3, 0, 5, 6, 0, 9, 5, -1, -1, -1, -1},
{0, 11, 8, 0, 5, 11, 0, 1, 5, 5, 6, 11, -1, -1, -1, -1},
{6, 11, 3, 6, 3, 5, 5, 3, 1, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 10, 9, 5, 11, 9, 11, 8, 11, 5, 6, -1, -1, -1, -1},
{0, 11, 3, 0, 6, 11, 0, 9, 6, 5, 6, 9, 1, 2, 10, -1},
{11, 8, 5, 11, 5, 6, 8, 0, 5, 10, 5, 2, 0, 2, 5, -1},
{6, 11, 3, 6, 3, 5, 2, 10, 3, 10, 5, 3, -1, -1, -1, -1},
{5, 8, 9, 5, 2, 8, 5, 6, 2, 3, 8, 2, -1, -1, -1, -1},
{9, 5, 6, 9, 6, 0, 0, 6, 2, -1, -1, -1, -1, -1, -1, -1},
{1, 5, 8, 1, 8, 0, 5, 6, 8, 3, 8, 2, 6, 2, 8, -1},
{1, 5, 6, 2, 1, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 3, 6, 1, 6, 10, 3, 8, 6, 5, 6, 9, 8, 9, 6, -1},
{10, 1, 0, 10, 0, 6, 9, 5, 0, 5, 6, 0, -1, -1, -1, -1},
{0, 3, 8, 5, 6, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{10, 5, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{11, 5, 10, 7, 5, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{11, 5, 10, 11, 7, 5, 8, 3, 0, -1, -1, -1, -1, -1, -1, -1},
{5, 11, 7, 5, 10, 11, 1, 9, 0, -1, -1, -1, -1, -1, -1, -1},
{10, 7, 5, 10, 11, 7, 9, 8, 1, 8, 3, 1, -1, -1, -1, -1},
{11, 1, 2, 11, 7, 1, 7, 5, 1, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 3, 1, 2, 7, 1, 7, 5, 7, 2, 11, -1, -1, -1, -1},
{9, 7, 5, 9, 2, 7, 9, 0, 2, 2, 11, 7, -1, -1, -1, -1},
{7, 5, 2, 7, 2, 11, 5, 9, 2, 3, 2, 8, 9, 8, 2, -1},
{2, 5, 10, 2, 3, 5, 3, 7, 5, -1, -1, -1, -1, -1, -1, -1},
{8, 2, 0, 8, 5, 2, 8, 7, 5, 10, 2, 5, -1, -1, -1, -1},
{9, 0, 1, 5, 10, 3, 5, 3, 7, 3, 10, 2, -1, -1, -1, -1},
{9, 8, 2, 9, 2, 1, 8, 7, 2, 10, 2, 5, 7, 5, 2, -1},
{1, 3, 5, 3, 7, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 7, 0, 7, 1, 1, 7, 5, -1, -1, -1, -1, -1, -1, -1},
{9, 0, 3, 9, 3, 5, 5, 3, 7, -1, -1, -1, -1, -1, -1, -1},
{9, 8, 7, 5, 9, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{5, 8, 4, 5, 10, 8, 10, 11, 8, -1, -1, -1, -1, -1, -1, -1},
{5, 0, 4, 5, 11, 0, 5, 10, 11, 11, 3, 0, -1, -1, -1, -1},
{0, 1, 9, 8, 4, 10, 8, 10, 11, 10, 4, 5, -1, -1, -1, -1},
{10, 11, 4, 10, 4, 5, 11, 3, 4, 9, 4, 1, 3, 1, 4, -1},
{2, 5, 1, 2, 8, 5, 2, 11, 8, 4, 5, 8, -1, -1, -1, -1},
{0, 4, 11, 0, 11, 3, 4, 5, 11, 2, 11, 1, 5, 1, 11, -1},
{0, 2, 5, 0, 5, 9, 2, 11, 5, 4, 5, 8, 11, 8, 5, -1},
{9, 4, 5, 2, 11, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{2, 5, 10, 3, 5, 2, 3, 4, 5, 3, 8, 4, -1, -1, -1, -1},
{5, 10, 2, 5, 2, 4, 4, 2, 0, -1, -1, -1, -1, -1, -1, -1},
{3, 10, 2, 3, 5, 10, 3, 8, 5, 4, 5, 8, 0, 1, 9, -1},
{5, 10, 2, 5, 2, 4, 1, 9, 2, 9, 4, 2, -1, -1, -1, -1},
{8, 4, 5, 8, 5, 3, 3, 5, 1, -1, -1, -1, -1, -1, -1, -1},
{0, 4, 5, 1, 0, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{8, 4, 5, 8, 5, 3, 9, 0, 5, 0, 3, 5, -1, -1, -1, -1},
{9, 4, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 11, 7, 4, 9, 11, 9, 10, 11, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 3, 4, 9, 7, 9, 11, 7, 9, 10, 11, -1, -1, -1, -1},
{1, 10, 11, 1, 11, 4, 1, 4, 0, 7, 4, 11, -1, -1, -1, -1},
{3, 1, 4, 3, 4, 8, 1, 10, 4, 7, 4, 11, 10, 11, 4, -1},
{4, 11, 7, 9, 11, 4, 9, 2, 11, 9, 1, 2, -1, -1, -1, -1},
{9, 7, 4, 9, 11, 7, 9, 1, 11, 2, 11, 1, 0, 8, 3, -1},
{11, 7, 4, 11, 4, 2, 2, 4, 0, -1, -1, -1, -1, -1, -1, -1},
{11, 7, 4, 11, 4, 2, 8, 3, 4, 3, 2, 4, -1, -1, -1, -1},
{2, 9, 10, 2, 7, 9, 2, 3, 7, 7, 4, 9, -1, -1, -1, -1},
{9, 10, 7, 9, 7, 4, 10, 2, 7, 8, 7, 0, 2, 0, 7, -1},
{3, 7, 10, 3, 10, 2, 7, 4, 10, 1, 10, 0, 4, 0, 10, -1},
{1, 10, 2, 8, 7, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 9, 1, 4, 1, 7, 7, 1, 3, -1, -1, -1, -1, -1, -1, -1},
{4, 9, 1, 4, 1, 7, 0, 8, 1, 8, 7, 1, -1, -1, -1, -1},
{4, 0, 3, 7, 4, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 8, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{9, 10, 8, 10, 11, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{3, 0, 9, 3, 9, 11, 11, 9, 10, -1, -1, -1, -1, -1, -1, -1},
{0, 1, 10, 0, 10, 8, 8, 10, 11, -1, -1, -1, -1, -1, -1, -1},
{3, 1, 10, 11, 3, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 11, 1, 11, 9, 9, 11, 8, -1, -1, -1, -1, -1, -1, -1},
{3, 0, 9, 3, 9, 11, 1, 2, 9, 2, 11, 9, -1, -1, -1, -1},
{0, 2, 11, 8, 0, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{3, 2, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{2, 3, 8, 2, 8, 10, 10, 8, 9, -1, -1, -1, -1, -1, -1, -1},
{9, 10, 2, 0, 9, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{2, 3, 8, 2, 8, 10, 0, 1, 8, 1, 10, 8, -1, -1, -1, -1},
{1, 10, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 3, 8, 9, 1, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 9, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 3, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}};
__constant__ int vertices_to_offset[12][4]={ {0, 1, 1, 0}, // #0
{1, 1, 1, 0}, // #1
{0, 1, 0, 0}, // #2
{1, 0, 1, 0}, // #3
{0, 1, 1, 1}, // #4
{1, 1, 1, 1}, // #5
{0, 1, 0, 1}, // #6
{1, 0, 1, 1}, // #7
{2, 0, 1, 1}, // #8
{2, 1, 1, 1}, // #9
{2, 1, 0, 1}, // #10
{2, 0, 0, 1}}; // #11
namespace{
/**
* convert vertex displacement field to vertices locations
* params:
* offset input, vertex displacement field, 3xWxHxD
* W input, number of cells on one of the directions
* H input, number of cells on one of the directions
* D input, number of cells on one of the directions
* x input, indice of a cell in the full grid on one of the directions
* y input, indice of a cell in the full grid on one of the directions
* z input, indice of a cell in the full grid on one of the directions
* vertices output, the location of 12 vertices for the specific cell, 3x12
*
*/
template <typename scalar_t>
__device__ void offset_to_vertices_cuda(const scalar_t *offset, const int W, const int H, const int D, const int x, const int y, const int z, scalar_t *vertices){
// #0
vertices[0 ] = 0.5-offset[0 + (x+1)*H*D + (y+1)*D + z ];
vertices[1 ] = 1.0;
vertices[2 ] = 0.0;
// #1
vertices[3 ] = 1.0;
vertices[4 ] = 0.5-offset[1*W*H*D + (x+1)*H*D + (y+1)*D + z ];
vertices[5 ] = 0.0;
// #2
vertices[6 ] = 0.5-offset[0 + (x+1)*H*D + (y )*D + z ];
vertices[7 ] = 0.0;
vertices[8 ] = 0.0;
// #3
vertices[9 ] = 0.0;
vertices[10] = 0.5-offset[1*W*H*D + (x )*H*D + (y+1)*D + z ];
vertices[11] = 0.0;
// #4
vertices[12] = 0.5-offset[0 + (x+1)*H*D + (y+1)*D + z+1 ];
vertices[13] = 1.0;
vertices[14] = 1.0;
// #5
vertices[15] = 1.0;
vertices[16] = 0.5-offset[1*W*H*D + (x+1)*H*D + (y+1)*D + z+1 ];
vertices[17] = 1.0;
// #6
vertices[18] = 0.5-offset[0 + (x+1)*H*D + (y )*D + z+1 ];
vertices[19] = 0.0;
vertices[20] = 1.0;
// #7
vertices[21] = 0.0;
vertices[22] = 0.5-offset[1*W*H*D + (x )*H*D + (y+1)*D + z+1 ];
vertices[23] = 1.0;
// #8
vertices[24] = 0.0;
vertices[25] = 1.0;
vertices[26] = 0.5-offset[2*W*H*D + (x )*H*D + (y+1)*D + z+1 ];
// #9
vertices[27] = 1.0;
vertices[28] = 1.0;
vertices[29] = 0.5-offset[2*W*H*D + (x+1)*H*D + (y+1)*D + z+1 ];
// #10
vertices[30] = 1.0;
vertices[31] = 0.0;
vertices[32] = 0.5-offset[2*W*H*D + (x+1)*H*D + (y )*D + z+1 ];
// #11
vertices[33] = 0.0;
vertices[34] = 0.0;
vertices[35] = 0.5-offset[2*W*H*D + (x )*H*D + (y )*D + z+1 ];
}
/**
* d_sqrdistance/d_x
*/
template <typename scalar_t>
__device__ scalar_t d_sqrdistance_(scalar_t a, scalar_t b, scalar_t c, scalar_t d, scalar_t e, scalar_t f, scalar_t s, scalar_t t,
scalar_t d_a, scalar_t d_b, scalar_t d_c, scalar_t d_d, scalar_t d_e, scalar_t d_f, scalar_t d_s, scalar_t d_tt){
return d_a*s*s + 2.0*a*d_s*s +
d_c*t*t + 2.0*c*d_tt*t +
2.0*d_b*s*t + 2.0*b*d_s*t + 2*b*s*d_tt +
2.0*d_s*d + 2.0*s*d_d +
2.0*d_e*t + 2.0*e*d_tt + d_f;
}
/**
* d_s/d_x
*/
template <typename scalar_t>
__device__ scalar_t d_s_(scalar_t a, scalar_t b, scalar_t c, scalar_t d, scalar_t e,
scalar_t d_a, scalar_t d_b, scalar_t d_c, scalar_t d_d, scalar_t d_e,
scalar_t s_clamp, scalar_t t_clamp, scalar_t det){
if (s_clamp==0) return 0;
if (s_clamp+t_clamp<=1){
scalar_t d_det = d_a*c + a*d_c - 2.0*b*d_b;
scalar_t det2 = det*det;
if (det2<eps) det2=eps;
return ((d_b*e + b*d_e - d_c*d - c*d_d)*det - (b*e-c*d)*d_det ) / ( det2 );
}else if (s_clamp + t_clamp >1 && t_clamp > 0){
scalar_t tmp = b*e - c*d + b*d - a*e;
return ((d_b*e + b*d_e - d_c*d - c*d_d)*(b*d - a*e) - (b*e-c*d)*(d_b*d + b*d_d - d_a*e - a*d_e) ) / (tmp*tmp);
}else{
return 0;
}
}
/**
* d_t/d_x
*/
template <typename scalar_t>
__device__ scalar_t d_t_(scalar_t a, scalar_t b, scalar_t c, scalar_t d, scalar_t e,
scalar_t d_a, scalar_t d_b, scalar_t d_c, scalar_t d_d, scalar_t d_e,
scalar_t s_clamp, scalar_t t_clamp, scalar_t det){
if (t_clamp==0) return 0;
if (s_clamp+t_clamp<=1){
scalar_t d_det = d_a*c + a*d_c - 2.0*b*d_b;
scalar_t det2 = det*det;
if (det2<eps) det2=eps;
return ((d_b*d + b*d_d - d_a*e - a*d_e)*det - (b*d-a*e)*d_det ) / ( det2 );
}else if (s_clamp + t_clamp >1 && s_clamp > 0){
scalar_t tmp = b*e - c*d + b*d - a*e;
return ((d_b*d + b*d_d - d_a*e - a*d_e)*(b*e - c*d) - (b*d-a*e)*(d_b*e + b*d_e - d_c*d - c*d_d)) / (tmp * tmp);
}else{
return 0;
}
}
/**
* grad_triangle_to_offset
*/
template <typename scalar_t>
__device__ void grad_triangle_to_offset(const scalar_t *grad_triangle, scalar_t *grad_offset, const int W, const int H, const int D, const int i, const int j, const int k, const int t, const scalar_t count){
// for triangles in a single toplogy
for (int tri_ind = 0; tri_ind<acceptTopology[1][t]; tri_ind++){
// for vertices on the triangle
for (int vertex_ind = 0; vertex_ind<3; vertex_ind++){
// every vertex only contributes to the gradient of a single variable on the offset map
int topology_ind = acceptTopology[0][t];
int vertex = triTable[topology_ind][tri_ind*3+vertex_ind];
atomicAdd( &grad_offset[vertices_to_offset[vertex][0]*W*H*D +
(vertices_to_offset[vertex][1]+i)*H*D +
(vertices_to_offset[vertex][2]+j)*D +
vertices_to_offset[vertex][3]+k],
-grad_triangle[ tri_ind*9 + vertex_ind*3 + vertices_to_offset[vertex][0] ]/count );
}
}
}
/*
* Compute the distance between a single point and a single triangle
* params:
* triangle: 1D vector, length 3*3, 3 vertices in [0,1,2],[3,4,5],[6,7,8]
* point: 1D vector, length 3
* distance: scalar value
*/
template <typename scalar_t>
__device__ scalar_t point_triangle_distance_forward(const scalar_t *triangle, const scalar_t *point)
{
scalar_t det, s, t, sqrdistance;
scalar_t B[3] = {triangle[0], triangle[1], triangle[2]};
scalar_t E0[3] = {triangle[3]-B[0], triangle[4]-B[1], triangle[5]-B[2]};
scalar_t E1[3] = {triangle[6]-B[0], triangle[7]-B[1], triangle[8]-B[2]};
scalar_t a = E0[0]*E0[0] + E0[1]*E0[1] + E0[2]*E0[2];
scalar_t b = E0[0]*E1[0] + E0[1]*E1[1] + E0[2]*E1[2];
scalar_t c = E1[0]*E1[0] + E1[1]*E1[1] + E1[2]*E1[2];
scalar_t D[3] = {B[0]-point[0], B[1]-point[1], B[2]-point[2]};
scalar_t d = E0[0]*D[0] + E0[1]*D[1] + E0[2]*D[2];
scalar_t e = E1[0]*D[0] + E1[1]*D[1] + E1[2]*D[2];
scalar_t f = D[0]*D[0] + D[1]*D[1] + D[2]*D[2];
det = a*c - b*b;
if (det<eps) det=eps;
s = (b*e - c*d) / det;
t = (b*d - a*e) / det;
if (s<0) s=0;
if (t<0) t=0;
scalar_t norm = s+t;
if (norm>1){
s = s/norm;
t = t/norm;
}
sqrdistance = s * ( a*s + b*t + 2.0*d ) + t * ( b*s + c*t + 2.0*e ) + f;
return sqrdistance;
}
/*
* Backward function, compute the gradient on a single triangle w.r.t. the distance given a single point
* params:
* grad_output_ scalar_t, gradient on a point
* triangle: 1D vector, length 3*3, 3 vertices in [0,1,2],[3,4,5],[6,7,8]
* point: 1D vector, length 3
* grad_triangle: 1D vector, length 3*3, 3 vertices in [0,1,2],[3,4,5],[6,7,8]
*
*/
template <typename scalar_t>
__device__ void point_triangle_distance_backward(const scalar_t grad_output_, const scalar_t *triangle, const scalar_t *point, scalar_t *grad_triangle)
{
scalar_t det, s, t;
scalar_t t11, t12, t13, t21, t22, t23, t31, t32, t33;
scalar_t p1, p2, p3;
t11 = triangle[0];
t21 = triangle[1];
t31 = triangle[2];
t12 = triangle[3];
t22 = triangle[4];
t32 = triangle[5];
t13 = triangle[6];
t23 = triangle[7];
t33 = triangle[8];
p1 = point[0];
p2 = point[1];
p3 = point[2];
scalar_t B[3] = {triangle[0], triangle[1], triangle[2]};
scalar_t E0[3] = {triangle[3]-B[0], triangle[4]-B[1], triangle[5]-B[2]};
scalar_t E1[3] = {triangle[6]-B[0], triangle[7]-B[1], triangle[8]-B[2]};
scalar_t a = E0[0]*E0[0] + E0[1]*E0[1] + E0[2]*E0[2];
scalar_t b = E0[0]*E1[0] + E0[1]*E1[1] + E0[2]*E1[2];
scalar_t c = E1[0]*E1[0] + E1[1]*E1[1] + E1[2]*E1[2];
scalar_t d_t11,d_t21,d_t31,d_t12,d_t22,d_t32,d_t13,d_t23,d_t33;
d_t11=d_t21=d_t31=d_t12=d_t22=d_t32=d_t13=d_t23=d_t33 = 0;
scalar_t D[3] = {B[0]-p1, B[1]-p2, B[2]-p3};
scalar_t d = E0[0]*D[0] + E0[1]*D[1] + E0[2]*D[2];
scalar_t e = E1[0]*D[0] + E1[1]*D[1] + E1[2]*D[2];
scalar_t f = D[0]*D[0] + D[1]*D[1] + D[2]*D[2];
det = a*c - b*b;
if (det<eps) det=eps;
s = (b*e - c*d) / det;
t = (b*d - a*e) / det;
scalar_t d_a,d_b,d_c,d_d,d_e,d_f;
scalar_t s_clamp = s;
scalar_t t_clamp = t;
if (s<0) s_clamp=0;
if (t<0) t_clamp=0;
scalar_t s_norm = s_clamp;
scalar_t t_norm = t_clamp;
scalar_t norm = s_clamp+t_clamp;
if (norm>1){
s_norm = s_clamp/norm;
t_norm = t_clamp/norm;
}
// t11
d_a = 2*t11 - 2*t12; d_b = 2*t11 - t12 - t13; d_c = 2*t11 - 2*t13; d_d = p1 - 2*t11 + t12; d_e = p1 - 2*t11 + t13; d_f = 2*t11 - 2*p1;
d_t11 += grad_output_ * d_sqrdistance_(a,b,c,d,e,f,s_norm,t_norm, d_a,d_b,d_c,d_d,d_e,d_f, d_s_(a,b,c,d,e, d_a,d_b,d_c,d_d,d_e, s_clamp,t_clamp,det), d_t_(a,b,c,d,e, d_a,d_b,d_c,d_d,d_e, s_clamp,t_clamp,det));
// t21
d_a = 2*t21 - 2*t22; d_b = 2*t21 - t22 - t23; d_c = 2*t21 - 2*t23; d_d = p2 - 2*t21 + t22; d_e = p2 - 2*t21 + t23; d_f = 2*t21 - 2*p2;
d_t21 += grad_output_ * d_sqrdistance_(a,b,c,d,e,f,s_norm,t_norm, d_a,d_b,d_c,d_d,d_e,d_f, d_s_(a,b,c,d,e, d_a,d_b,d_c,d_d,d_e, s_clamp,t_clamp,det), d_t_(a,b,c,d,e, d_a,d_b,d_c,d_d,d_e, s_clamp,t_clamp,det));
// t31
d_a = 2*t31 - 2*t32; d_b = 2*t31 - t32 - t33; d_c = 2*t31 - 2*t33; d_d = p3 - 2*t31 + t32; d_e = p3 - 2*t31 + t33; d_f = 2*t31 - 2*p3;
d_t31 += grad_output_ * d_sqrdistance_(a,b,c,d,e,f,s_norm,t_norm, d_a,d_b,d_c,d_d,d_e,d_f, d_s_(a,b,c,d,e, d_a,d_b,d_c,d_d,d_e, s_clamp,t_clamp,det), d_t_(a,b,c,d,e, d_a,d_b,d_c,d_d,d_e, s_clamp,t_clamp,det));
// t12
d_a = 2*t12 - 2*t11; d_b = t13 - t11; d_c = 0.0; d_d = t11 - p1; d_e = 0.0; d_f = 0.0;
d_t12 += grad_output_ * d_sqrdistance_(a,b,c,d,e,f,s_norm,t_norm, d_a,d_b,d_c,d_d,d_e,d_f, d_s_(a,b,c,d,e, d_a,d_b,d_c,d_d,d_e, s_clamp,t_clamp,det), d_t_(a,b,c,d,e, d_a,d_b,d_c,d_d,d_e, s_clamp,t_clamp,det));
// t22
d_a = 2*t22 - 2*t21; d_b = t23 - t21; d_c = 0.0; d_d = t21 - p2; d_e = 0.0; d_f = 0.0;
d_t22 += grad_output_ * d_sqrdistance_(a,b,c,d,e,f,s_norm,t_norm, d_a,d_b,d_c,d_d,d_e,d_f, d_s_(a,b,c,d,e, d_a,d_b,d_c,d_d,d_e, s_clamp,t_clamp,det), d_t_(a,b,c,d,e, d_a,d_b,d_c,d_d,d_e, s_clamp,t_clamp,det));
// t32
d_a = 2*t32 - 2*t31; d_b = t33 - t31; d_c = 0.0; d_d = t31 - p3; d_e = 0.0; d_f = 0.0;
d_t32 += grad_output_ * d_sqrdistance_(a,b,c,d,e,f,s_norm,t_norm, d_a,d_b,d_c,d_d,d_e,d_f, d_s_(a,b,c,d,e, d_a,d_b,d_c,d_d,d_e, s_clamp,t_clamp,det), d_t_(a,b,c,d,e, d_a,d_b,d_c,d_d,d_e, s_clamp,t_clamp,det));
// t13
d_a = 0.0; d_b = t12 - t11; d_c = 2*t13 - 2*t11; d_d = 0.0; d_e = t11 - p1; d_f = 0.0;
d_t13 += grad_output_ * d_sqrdistance_(a,b,c,d,e,f,s_norm,t_norm, d_a,d_b,d_c,d_d,d_e,d_f, d_s_(a,b,c,d,e, d_a,d_b,d_c,d_d,d_e, s_clamp,t_clamp,det), d_t_(a,b,c,d,e, d_a,d_b,d_c,d_d,d_e, s_clamp,t_clamp,det));
// t23
d_a = 0.0; d_b = t22 - t21; d_c = 2*t23 - 2*t21; d_d = 0.0; d_e = t21 - p2; d_f = 0.0;
d_t23 += grad_output_ * d_sqrdistance_(a,b,c,d,e,f,s_norm,t_norm, d_a,d_b,d_c,d_d,d_e,d_f, d_s_(a,b,c,d,e, d_a,d_b,d_c,d_d,d_e, s_clamp,t_clamp,det), d_t_(a,b,c,d,e, d_a,d_b,d_c,d_d,d_e, s_clamp,t_clamp,det));
// t33
d_a = 0.0; d_b = t32 - t31; d_c = 2*t33 - 2*t31; d_d = 0.0; d_e = t31 - p3; d_f = 0.0;
d_t33 += grad_output_ * d_sqrdistance_(a,b,c,d,e,f,s_norm,t_norm, d_a,d_b,d_c,d_d,d_e,d_f, d_s_(a,b,c,d,e, d_a,d_b,d_c,d_d,d_e, s_clamp,t_clamp,det), d_t_(a,b,c,d,e, d_a,d_b,d_c,d_d,d_e, s_clamp,t_clamp,det));
grad_triangle[0] = d_t11;
grad_triangle[1] = d_t21;
grad_triangle[2] = d_t31;
grad_triangle[3] = d_t12;
grad_triangle[4] = d_t22;
grad_triangle[5] = d_t32;
grad_triangle[6] = d_t13;
grad_triangle[7] = d_t23;
grad_triangle[8] = d_t33;
}
/*
* cuda kernel, parallel over per cell per topology
*/
template <typename scalar_t>
__global__ void point_toplogy_distance_kernel(const scalar_t *offset, const scalar_t *points, scalar_t *distances, int *indices, const int n){
// topology
int t = threadIdx.x;
int topology_ind = acceptTopology[0][t];
int T = blockDim.x + 1;
// cell indices
int i = blockIdx.x;
int j = blockIdx.y;
int k = blockIdx.z;
// cell size
int Wc = gridDim.x;
int Hc = gridDim.y;
int Dc = gridDim.z;
int ind = i*Hc*Dc + j*Dc + k;
// offset size, note that we always have 3x(W+1)x(H+1)x(Dx1) offset for WxHxD grid
int W = Wc + 1;
int H = Hc + 1;
int D = Dc + 1;
// offset_to_vertices
scalar_t vertices[12*3];
offset_to_vertices_cuda(offset, W, H, D, i, j, k, vertices);
//scalar_t *triangle = offset_to_triangles(offset, i, j, k, t);
scalar_t distance_sum=0.0;
scalar_t count=0;
for (int p=0; p<n; p++){
scalar_t px = points[p*3+0];
scalar_t py = points[p*3+1];
scalar_t pz = points[p*3+2];
// if point is inside of the grid
if (px >= i && px < i+grid_size && py >= j && py < j+grid_size && pz >= k && pz < k+grid_size){
// min distance to a triangle in the same topology
// also save the min indice for back-propagation
scalar_t min_distance = 10000.0;
long int min_indice = -1;
for (int tri_ind = 0; tri_ind<acceptTopology[1][t]; tri_ind++){
// offset_to_triangles
// Note: offset_to_triangles is inside of the loop to avoid dynamically allocate memory, different to cpu version
scalar_t triangle_single[3*3] = { // v1
vertices[triTable[topology_ind][tri_ind*3+0]*3 + 0] + scalar_t(i),
vertices[triTable[topology_ind][tri_ind*3+0]*3 + 1] + scalar_t(j),
vertices[triTable[topology_ind][tri_ind*3+0]*3 + 2] + scalar_t(k),
// v2
vertices[triTable[topology_ind][tri_ind*3+1]*3 + 0] + scalar_t(i),
vertices[triTable[topology_ind][tri_ind*3+1]*3 + 1] + scalar_t(j),
vertices[triTable[topology_ind][tri_ind*3+1]*3 + 2] + scalar_t(k),
// v3
vertices[triTable[topology_ind][tri_ind*3+2]*3 + 0] + scalar_t(i),
vertices[triTable[topology_ind][tri_ind*3+2]*3 + 1] + scalar_t(j),
vertices[triTable[topology_ind][tri_ind*3+2]*3 + 2] + scalar_t(k) };
scalar_t point_single[3] = {px, py, pz};
scalar_t distance_single = point_triangle_distance_forward( triangle_single, point_single );
if (distance_single < min_distance){
min_distance = distance_single;
min_indice = tri_ind;
}
}
indices[p*T + t] = min_indice;
distance_sum += min_distance;
count += 1;
}
}
// if the current grid is not empty
if (count>0) {
distances[ind*T + t] = distance_sum/count;
} else {
distances[ind*T + t] = distance_empty;
}
__syncthreads();
}
/*
* cuda kernel, parallel over per cell per topology
*/
template <typename scalar_t>
__global__ void grad_point_toplogy_distance_kernel(const scalar_t *grad_output, const scalar_t *offset, const scalar_t *points, const int *indices, scalar_t *grad_offset, const int n){
// topology
int t = threadIdx.x;
int T = blockDim.x + 1;
int topology_ind = acceptTopology[0][t];
// cell indices
int i = blockIdx.x;
int j = blockIdx.y;
int k = blockIdx.z;
// cell size
int Wc = gridDim.x;
int Hc = gridDim.y;
int Dc = gridDim.z;
int ind = i*Hc*Dc + j*Dc + k;
int grad_ind = ind*T + t;
const scalar_t grad_output_element = grad_output[grad_ind];
//printf("%d %d %d, %d, grad_output_element %f\n", i, j, k, grad_ind, grad_output_element );
// offset size, note that we always have 3x(W+1)x(H+1)x(Dx1) offset for WxHxD grid
int W = Wc + 1;
int H = Hc + 1;
int D = Dc + 1;
// offset_to_vertices
scalar_t vertices[12*3];
offset_to_vertices_cuda(offset, W, H, D, i, j, k, vertices);
//scalar_t *triangle = offset_to_triangles(offset, i, j, k, t);
scalar_t count=0;
// allocate memory for accumulating the gradients
// assuming maximum number of triangles for each topology is 4 as in Marching Cubes
scalar_t grad_triangle_all[4*3*3] = {0};
for (int p=0; p<n; p++){
scalar_t px = points[p*3+0];
scalar_t py = points[p*3+1];
scalar_t pz = points[p*3+2];
// if point is inside of the grid
if (px >= scalar_t(i) && px < scalar_t(i)+grid_size && py >= scalar_t(j) && py < scalar_t(j)+grid_size && pz >= scalar_t(k) && pz < scalar_t(k)+grid_size){
// printf("(%f %f %f) in [%f %f %f]\n", px, py, pz, scalar_t(i), scalar_t(j), scalar_t(k) );
// printf("grad_output_element %f\n", grad_output_element );
// printf("grad_output_element index: %d*%d + %d = %d\n", ind, T, t, ind*T+t);
// only back propagate to the nearest triangle
int tri_ind = indices[p*T + t];
if (tri_ind == -1) continue;
// offset_to_triangles
// Note: offset_to_triangles is inside of the loop to avoid dynamically allocate memory, different to cpu version
scalar_t triangle_single[3*3] = { // v1
vertices[triTable[topology_ind][tri_ind*3+0]*3 + 0] + scalar_t(i),
vertices[triTable[topology_ind][tri_ind*3+0]*3 + 1] + scalar_t(j),
vertices[triTable[topology_ind][tri_ind*3+0]*3 + 2] + scalar_t(k),
// v2
vertices[triTable[topology_ind][tri_ind*3+1]*3 + 0] + scalar_t(i),
vertices[triTable[topology_ind][tri_ind*3+1]*3 + 1] + scalar_t(j),
vertices[triTable[topology_ind][tri_ind*3+1]*3 + 2] + scalar_t(k),
// v3
vertices[triTable[topology_ind][tri_ind*3+2]*3 + 0] + scalar_t(i),
vertices[triTable[topology_ind][tri_ind*3+2]*3 + 1] + scalar_t(j),
vertices[triTable[topology_ind][tri_ind*3+2]*3 + 2] + scalar_t(k) };
scalar_t point_single[3] = {px, py, pz};
scalar_t grad_triangle[3*3];
//point_triangle_distance_backward(grad_output[ind*T + t], triangle_single, point_single, grad_triangle);
point_triangle_distance_backward(grad_output_element, triangle_single, point_single, grad_triangle);
// accumulate gradients over all the points for each triangle
// to reduce times of updating global memory
for (int gi=0; gi<9; gi++){
grad_triangle_all[tri_ind*9 + gi] += grad_triangle[gi];
}
count += 1.0;
}
}
if (count<1.0) return;
//
grad_triangle_to_offset(grad_triangle_all, grad_offset, W, H, D, i, j, k, t, count);
}
/*
* check all distances and assign a large loss to the empty topology if the cell is not empty
* params:
* distances point to line-segment distance
* T number of all acceptable topologies
*/
template <typename scalar_t>
__global__ void update_empty_topology(scalar_t *distances, const int T){
// cell indices
int i = blockIdx.x;
int j = blockIdx.y;
int k = blockIdx.z;
// cell size
int Hc = gridDim.y;
int Dc = gridDim.z;
int ind = i*Hc*Dc + j*Dc + k;
int empty=1;
scalar_t max_distance = -1.0;
for (int t=0; t<T-1; t++){
scalar_t d = distances[ind*T + t];
if (d!=distance_empty) empty = 0;
if (d>max_distance) max_distance = d;
}
if (empty==0) {
distances[ind*T + T-1] = max_distance*10.0;
}
__syncthreads();
}
} //namespace
/*
* Forward function, calculating the point to mesh distances for all grids
* params:
* offset input, offset map for x,y,z directions, 3x(W+1)x(H+1)x(D+1)
* points input, all points, N_allx3
* distances output, point to mesh distances for every grid for every topolopy, (WxHxD)xT
* indices_all output, to record which triangle in each topology is the nearest one for backpropagation, N_allxT
*/
void point_topology_distance_kernel_forward(
at::Tensor offset,
at::Tensor points,
at::Tensor distances,
at::Tensor indices_all){
int W = offset.size(1)-1;
int H = offset.size(2)-1;
int D = offset.size(3)-1;
int T = distances.size(1);
dim3 dimGrid(W, H, D);
dim3 dimBlock(T-1, 1, 1);
int n = points.size(0);
assert(offset.type().scalarType() == at::ScalarType::Float);
assert(points.type().scalarType() == at::ScalarType::Float);
assert(distances.type().scalarType() == at::ScalarType::Float);
assert(indices_all.type().scalarType() == at::ScalarType::Int);
// lauch the kernel
point_toplogy_distance_kernel<float><<< dimGrid, dimBlock>>>(
offset.data<float>(),
points.data<float>(),
distances.data<float>(),
indices_all.data<int>(),
n);
update_empty_topology<float><<<dimGrid, 1>>>(
distances.data<float>(),
T);
}
/*
* Backward function, calculating the gradients for the full offset map
* params:
* grad_output input, gradient on the output distances, (WxHxD)xT
* offset input, offset map for x,y,z directions, 3x(W+1)x(H+1)x(D+1)
* points input, all points, N_allx3
* indices_all input, recorded which triangle in each topology is the nearest one for backpropagation, N_allxT
* grad_offset output, gradient on the full offset map, 3x(W+1)x(H+1)x(D+1)
*
*/
void point_topology_distance_kernel_backward(
at::Tensor grad_output,
at::Tensor offset,
at::Tensor points,
at::Tensor indices_all,
at::Tensor grad_offset){
int W = offset.size(1) - 1;
int H = offset.size(2) - 1;
int D = offset.size(3) - 1;
int T = grad_output.size(1);
dim3 dimGrid(W, H, D);
dim3 dimBlock(T-1, 1, 1);
int n = points.size(0);
assert(offset.type().scalarType() == at::ScalarType::Float);
assert(points.type().scalarType() == at::ScalarType::Float);
assert(grad_output.type().scalarType() == at::ScalarType::Float);
assert(indices_all.type().scalarType() == at::ScalarType::Int);
assert(grad_offset.type().scalarType() == at::ScalarType::Float);
// lauch the kernel
grad_point_toplogy_distance_kernel<float><<<dimGrid, dimBlock>>>(
grad_output.data<float>(),
offset.data<float>(),
points.data<float>(),
indices_all.data<int>(),
grad_offset.data<float>(),
n);
}
|
the_stack
|
// Thrust Dependencies
#include <thrust/random.h>
#include <thrust/device_ptr.h>
#include <thrust/copy.h>
// GL Dependency
#include <glm/gtc/matrix_transform.hpp>
// Octree-SLAM Dependencies
#include <octree_slam/rendering/rasterize_kernels.h>
#include <octree_slam/rendering/rasterize_tools.h>
#include <octree_slam/cuda_common_kernels.h>
#define SHOWBODY 0
#define SHOWLINES 0
#define SHOWVERTICES 0
namespace octree_slam {
namespace rendering {
glm::vec3* framebuffer;
fragment* depthbuffer;
float* device_vbo;
float* device_cbo;
int* device_ibo;
float* device_nbo;
triangle* primitives;
triangle* primitives2;
glm::vec3 lightDir;
bmp_texture* device_tex;
void checkCUDAError(const char *msg) {
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err) {
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
//Handy dandy little hashing function that provides seeds for random number generation
__host__ __device__ unsigned int hash(unsigned int a){
a = (a+0x7ed55d16) + (a<<12);
a = (a^0xc761c23c) ^ (a>>19);
a = (a+0x165667b1) + (a<<5);
a = (a+0xd3a2646c) ^ (a<<9);
a = (a+0xfd7046c5) + (a<<3);
a = (a^0xb55a4f09) ^ (a>>16);
return a;
}
//Writes a given fragment to a fragment buffer at a given location
__host__ __device__ void writeToDepthbuffer(int x, int y, fragment frag, fragment* depthbuffer, glm::vec2 resolution){
if(x<resolution.x && y<resolution.y){
int index = (y*resolution.x) + x;
depthbuffer[index] = frag;
}
}
//Reads a fragment from a given location in a fragment buffer
__host__ __device__ fragment getFromDepthbuffer(int x, int y, fragment* depthbuffer, glm::vec2 resolution){
if(x<resolution.x && y<resolution.y){
int index = (y*resolution.x) + x;
return depthbuffer[index];
}else{
fragment f;
return f;
}
}
//Writes a given pixel to a pixel buffer at a given location
__host__ __device__ void writeToFramebuffer(int x, int y, glm::vec3 value, glm::vec3* framebuffer, glm::vec2 resolution){
if(x<resolution.x && y<resolution.y){
int index = (y*resolution.x) + x;
framebuffer[index] = value;
}
}
//Reads a pixel from a pixel buffer at a given location
__host__ __device__ glm::vec3 getFromFramebuffer(int x, int y, glm::vec3* framebuffer, glm::vec2 resolution){
if(x<resolution.x && y<resolution.y){
int index = (y*resolution.x) + x;
return framebuffer[index];
}else{
return glm::vec3(0,0,0);
}
}
//Kernel that clears a given pixel buffer with a given color
__global__ void clearImage(glm::vec2 resolution, glm::vec3* image, glm::vec3 color){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
image[index] = color;
}
}
//Kernel that clears a given fragment buffer with a given fragment
__global__ void clearDepthBuffer(glm::vec2 resolution, fragment* buffer, fragment frag){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
fragment f = frag;
f.position.x = x;
f.position.y = y;
buffer[index] = f;
}
}
//Kernel that writes the image to the OpenGL PBO directly.
__global__ void sendImageToPBO(uchar4* PBOpos, glm::vec2 resolution, glm::vec3* image){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
glm::vec3 color;
color.x = image[index].x*255.0;
color.y = image[index].y*255.0;
color.z = image[index].z*255.0;
if(color.x>255){
color.x = 255;
}
if(color.y>255){
color.y = 255;
}
if(color.z>255){
color.z = 255;
}
// Each thread writes one pixel location in the texture (textel)
PBOpos[index].w = 0;
PBOpos[index].x = color.x;
PBOpos[index].y = color.y;
PBOpos[index].z = color.z;
}
}
__global__ void vertexShadeKernel(float* vbo, int vbosize, float* nbo, int nbosize, glm::vec2 resolution, float zNear, float zFar, glm::mat4 projection, glm::mat4 view){
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if(index<vbosize/3){
int idxX = 3*index;
int idxY = idxX+1;
int idxZ = idxX+2;
//Transform position
glm::vec4 pos = glm::vec4(vbo[idxX],vbo[idxY],vbo[idxZ],1.0f);
glm::mat4 transformationMatrix = projection * view * glm::mat4();
pos = transformationMatrix * pos;
glm::vec4 new_pos = pos/pos.w;
vbo[idxX] = -new_pos.x;
vbo[idxY] = new_pos.y;
vbo[idxZ] = new_pos.z;
//Transform vertices
vbo[idxX] = (resolution.x/2.0f) * (vbo[idxX]+1.0f);
vbo[idxY] = (resolution.y/2.0f) * (vbo[idxY]+1.0f);
vbo[idxZ] = ((zFar - zNear)/2.0f) * (vbo[idxZ]+1.0f);
//Transform normal
glm::vec4 originnormal = glm::vec4(nbo[idxX],nbo[idxY],nbo[idxZ],0.0f);
//originnormal = projection * originnormal;
nbo[idxX] = originnormal.x;
nbo[idxY] = originnormal.y;
nbo[idxZ] = originnormal.z;
}
}
__global__ void primitiveAssemblyKernel(float* vbo, int vbosize, float* cbo, int cbosize, int* ibo, int ibosize, float* nbo, int nbosize, triangle* primitives){
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
int primitivesCount = ibosize/3;
if(index<primitivesCount){
int v0 = ibo[index*3];
int v1 = ibo[index*3+1];
int v2 = ibo[index*3+2];
glm::vec3 c0(cbo[0], cbo[1], cbo[2]);
glm::vec3 c1(cbo[3], cbo[4], cbo[5]);
glm::vec3 c2(cbo[6], cbo[7], cbo[8]);
glm::vec3 p0(vbo[v0*3], vbo[v0*3+1], vbo[v0*3+2]);
glm::vec3 p1(vbo[v1*3], vbo[v1*3+1], vbo[v1*3+2]);
glm::vec3 p2(vbo[v2*3], vbo[v2*3+1], vbo[v2*3+2]);
glm::vec3 n0(nbo[v0*3], nbo[v0*3+1], nbo[v0*3+2]);
glm::vec3 n1(nbo[v1*3], nbo[v1*3+1], nbo[v1*3+2]);
glm::vec3 n2(nbo[v2*3], nbo[v2*3+1], nbo[v2*3+2]);
primitives[index].c0 = c0;
primitives[index].c1 = c1;
primitives[index].c2 = c2;
primitives[index].n0 = n0;
primitives[index].n1 = n1;
primitives[index].n2 = n2;
primitives[index].p0 = p0;
primitives[index].p1 = p1;
primitives[index].p2 = p2;
}
}
//Thrust predicate for triangle removal of backfacing
struct check_triangle {
__host__ __device__
bool operator() (const triangle& t) {
float x1 = t.p1.x - t.p0.x;
float y1 = t.p1.y - t.p0.y;
float x2 = t.p2.x - t.p0.x;
float y2 = t.p2.y - t.p0.y;
return ((x1*y2 - y1*x2) < 0.0f);
}
};
//Kernel to trim primitives before rasterization
__host__ void culling(triangle* primitives, triangle* new_primitives, int& numPrimitives) {
thrust::device_ptr<triangle> in = thrust::device_pointer_cast<triangle>(primitives);
thrust::device_ptr<triangle> out = thrust::device_pointer_cast<triangle>(new_primitives);
numPrimitives = thrust::copy_if(in, in + numPrimitives, out, check_triangle()) - out;
}
__device__ glm::vec2 scanLineTriangleIntersect(glm::vec2 p1,glm::vec2 p2,glm::vec2 q1,glm::vec2 q2,glm::vec2 q3) {
float min_t = 1.0f, max_t = 0.0f;
glm::vec2 scanLine = p2 - p1;
glm::vec2 triLine1 = q2 - q1;
glm::vec2 triLine2 = q3 - q2;
glm::vec2 triLine3 = q1 - q3;
float crossValue[3]={0};
//-------------------------------
glm::vec2 cutLine = q1 - p1;
crossValue[0] = scanLine.x*triLine1.y - scanLine.y*triLine1.x;
crossValue[1] = cutLine.x*triLine1.y - triLine1.x*cutLine.y;
crossValue[2] = cutLine.x*scanLine.y - scanLine.x*cutLine.y;
if(abs(crossValue[0]) > 0.0001){
float t = crossValue[1]/crossValue[0];
float u = crossValue[2]/crossValue[0];
if(u>0 && u<1 && t>0 && t<1){
min_t = glm::min(t, min_t);
max_t = glm::max(t, max_t);
}
}
cutLine = q2 - p1;
crossValue[0] = scanLine.x*triLine2.y - scanLine.y*triLine2.x;
crossValue[1] = cutLine.x*triLine2.y - triLine2.x*cutLine.y;
crossValue[2] = cutLine.x*scanLine.y - scanLine.x*cutLine.y;
if(abs(crossValue[0]) > 0.0001){
float t = crossValue[1]/crossValue[0];
float u = crossValue[2]/crossValue[0];
if(u>0 && u<1 && t>0 && t<1){
min_t = glm::min(t, min_t);
max_t = glm::max(t, max_t);
}
}
cutLine = q3 - p1;
crossValue[0] = scanLine.x*triLine3.y - scanLine.y*triLine3.x;
crossValue[1] = cutLine.x*triLine3.y - triLine3.x*cutLine.y;
crossValue[2] = cutLine.x*scanLine.y - scanLine.x*cutLine.y;
if(abs(crossValue[0]) > 0.0001){
float t = crossValue[1]/crossValue[0];
float u = crossValue[2]/crossValue[0];
if(u>0 && u<1 && t>0 && t<1){
min_t = glm::min(t, min_t);
max_t = glm::max(t, max_t);
}
}
return glm::vec2(min_t, max_t);
}
//Interpolation by Barycentric Coordinates
//reference...http://mathworld.wolfram.com/BarycentricCoordinates.html
__device__ glm::vec3 bcInterpolate(glm::vec3 BC, glm::vec3 e1,glm::vec3 e2,glm::vec3 e3) {
return BC.x * e1+ BC.y * e2 + BC.z * e3;
}
__global__ void rasterizationKernel(triangle* primitives, int primitivesCount, fragment* depthbuffer, glm::vec2 resolution, float zNear, float zFar, bool barycenter){
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if(index<primitivesCount){
glm::vec3 primMin, primMax;
triangle thisTri = primitives[index];
getAABBForTriangle(thisTri, primMin, primMax);
fragment frag;
frag.primIndex = index;
for(int j=glm::max(primMin.y,0.0f); j<glm::min(primMax.y,resolution.y)+1; j++){//scan from the bottom to the top, find left and right intersect points with triangle and draw it
glm::vec2 portion = scanLineTriangleIntersect(glm::vec2(primMin.x, float(j)),glm::vec2(primMax.x, float(j)),glm::vec2(thisTri.p0),glm::vec2(thisTri.p1),glm::vec2(thisTri.p2));
int leftX = primMin.x + portion.x * (primMax.x - primMin.x);
int rightX = primMin.x + portion.y * (primMax.x - primMin.x) + 1; //have to add 1 for the last X
for(int i=glm::max(leftX,0);i<glm::min(rightX,(int)resolution.x);i++){
int screenPointIndex = (resolution.y - (j+1)) * resolution.x + (i-1);
glm::vec3 bary = calculateBarycentricCoordinate (thisTri, glm::vec2(i,j));
if (isBarycentricCoordInBounds(bary)){ //barycenter triangle interpolation
frag.position.x = i;
frag.position.y = j;
frag.position.z = getZAtCoordinate(bary, thisTri);
if(barycenter){
frag.color = bcInterpolate(bary, thisTri.c0, thisTri.c1, thisTri.c2);
//normal as color: (for normal value test)
//frag.color = bcInterpolate(bary, thisTri.n0, thisTri.n1, thisTri.n2);
frag.normal = bcInterpolate(bary, thisTri.n0, thisTri.n1, thisTri.n2);
frag.normal = glm::normalize(frag.normal);
}
else{
frag.color = (thisTri.c0+thisTri.c1+thisTri.c2)/3.0f;
frag.normal = (thisTri.n0+thisTri.n1+thisTri.n2)/3.0f;
frag.normal = glm::normalize(frag.normal);
}
//show the most front primitive
if (depthbuffer[screenPointIndex].position.z<frag.position.z && frag.position.z<-zNear && frag.position.z>-zFar){
depthbuffer[screenPointIndex] = frag;
depthbuffer[screenPointIndex].depth = 1;
}
}
}
}
}
}
//Show Lines
__global__ void linesRasterizeKernel(triangle* primitives, int primitivesCount, fragment* depthbuffer, glm::vec2 resolution, float zNear, float zFar){
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if(index<primitivesCount){
glm::vec3 primMin, primMax;
triangle thisTri = primitives[index];
getAABBForTriangle(thisTri, primMin, primMax);
fragment frag;
if(primMin.x>resolution.x || primMin.y>resolution.y || primMin.z>zFar || primMax.x<0 || primMax.y<0 ||primMax.z<zNear) //prim outside of the screen
return;
else{
int startY = glm::max(int(primMin.y), 0);
int endY = glm::min(int(primMax.y), (int)resolution.y);
int startX = glm::max(int(primMin.x), 0);
int endX = glm::min(int(primMax.x), (int)resolution.x);
for(int j=startY; j<endY; j++){
glm::vec2 intersectPortion = scanLineTriangleIntersect(glm::vec2(startX, float(j)),glm::vec2(endX, float(j)),glm::vec2(thisTri.p0),glm::vec2(thisTri.p1),glm::vec2(thisTri.p2));
float left = intersectPortion.x;
float right = intersectPortion.y;
int leftstartX = startX + (endX-startX)*(float)left;
int rightendX = startX + (endX-startX)*(float)right;
for(int i=leftstartX; i<rightendX; i++){
int screenPointIndex = ((resolution.y - j -1)*resolution.x) + i - 1;
if(i>0 && j>0 && i<resolution.x && j<resolution.y){
glm::vec3 baryCoord = calculateBarycentricCoordinate (thisTri, glm::vec2 (i,j));
if (!isBarycentricCoordInBounds(baryCoord)){ //show lines
// Interpolation by BC
frag.position = bcInterpolate(baryCoord,thisTri.p0,thisTri.p1,thisTri.p2);
frag.position.z = -frag.position.z;
frag.color = glm::vec3(0,1,0);
frag.normal = glm::vec3(0,0,1);
//show the most front primitive
if (frag.position.z<-zNear&&frag.position.z>-zFar)
depthbuffer[screenPointIndex] = frag;
}
}
}
}
}
}
}
//Show Vertices
__global__ void verticesRasterizeKernel(triangle* primitives, int primitivesCount, fragment* depthbuffer, glm::vec2 resolution, float zNear, float zFar){
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if(index<primitivesCount){
glm::vec3 point;
//for each primitive, get each point, assign color and normal to the buffer
for(int k=0; k<3; k++){
if(k==0) point = primitives[index].p0;
else if(k==1) point = primitives[index].p1;
else point = primitives[index].p2;
int i = glm::round(point.x);
int j = glm::round(point.y);
if(i>0 && j>0 && i<resolution.x && j<resolution.y){
int depthIndex = ((resolution.y - j -1)*resolution.x) + i - 1;
fragment frag;
frag.position = glm::vec3(point.x, point.y, point.z);
frag.normal = glm::vec3(0,0,1);
frag.color = glm::vec3(1,1,1);
if (frag.position.z>zNear&&frag.position.z<zFar)
depthbuffer[depthIndex] = frag;
}
}
}
}
__global__ void fragmentShadeKernel(fragment* depthbuffer, glm::vec2 resolution, glm::vec3 *lightDir, bmp_texture *tex, glm::vec3 *device_data){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
if (depthbuffer[index].position.z > -10000.0f) {
float diffuse = glm::dot(-*lightDir,depthbuffer[index].normal);
diffuse = diffuse>0?diffuse:0;
int x = depthbuffer[index].texcoord.x * tex->width;
int y = depthbuffer[index].texcoord.y * tex->height;
glm::vec3 tex_color0 = device_data[y * tex->height + x];
glm::vec3 tex_color1 = device_data[y * tex->height + x+1];
glm::vec3 tex_color2 = device_data[(y+1) * tex->height + x];
glm::vec3 tex_color3 = device_data[(y+1) * tex->height + x+1];
float xx = depthbuffer[index].texcoord.x * tex->width - x;
float yy = depthbuffer[index].texcoord.y * tex->height - y;
glm::vec3 tex_color = (tex_color0 * (1-xx) + tex_color1 * xx) * (1-yy) + (tex_color2 * (1-xx) + tex_color3 * xx) * yy;
depthbuffer[index].color = tex_color*diffuse*0.9f+tex_color*0.1f;
}
}
}
//Handy function for reflection
__host__ __device__ glm::vec3 reflect(glm::vec3 vec_in, glm::vec3 norm) {
return (vec_in - 2.0f*glm::dot(vec_in, norm)*norm);
}
//Phong shader
__global__ void fragmentShadePhongKernel(fragment* depthbuffer, glm::vec2 resolution, glm::vec3 lightpos){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if (x <= resolution.x && y <= resolution.y){
if (depthbuffer[index].position.z > -10000.0f) {
//Store the fragment info locally for accessibility
glm::vec3 V = depthbuffer[index].position;
glm::vec3 N = depthbuffer[index].normal;
//Compute necessary vectors
glm::vec3 L = glm::normalize(lightpos- V);
glm::vec3 E = glm::normalize(-V);
glm::vec3 R = glm::normalize(reflect(-L, N));
//Shininess
float specPow = 4.0f;
//Green (TODO: read from material)
glm::vec3 green(0.0f, 1.0f, 0.0f);
//Compute lighting
glm::vec3 ambient = 0.1f * green;
glm::vec3 diffuse = 0.45f * clamp(glm::dot(N, L), 0.0f, 1.0f) * green;
glm::vec3 specular = 0.45f * clamp(pow(max(glm::dot(R, E), 0.0f), specPow), 0.0f, 1.0f) * green;
depthbuffer[index].color = ambient + diffuse + specular;
}
}
}
//Writes fragment colors to the framebuffer
__global__ void render(glm::vec2 resolution, fragment* depthbuffer, glm::vec3* framebuffer){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
framebuffer[index] = depthbuffer[index].color;
}
}
// Wrapper for the __global__ call that sets up the kernel calls and does a ton of memory management
extern "C" void rasterizeMesh(uchar4* PBOpos, glm::vec2 resolution, glm::mat4 rotationM, float frame, float* vbo, int vbosize, float* cbo, int cbosize, int* ibo, int ibosize, float* nbo, int nbosize, const bmp_texture *tex, std::vector<glm::vec4> *texcoord, glm::mat4 view, glm::vec3 lightpos, int mode, bool barycenter){
// set up crucial magic
int tileSize = 8;
dim3 threadsPerBlock(tileSize, tileSize);
dim3 fullBlocksPerGrid((int)ceil(float(resolution.x)/float(tileSize)), (int)ceil(float(resolution.y)/float(tileSize)));
//set up framebuffer
framebuffer = NULL;
cudaMalloc((void**)&framebuffer, (int)resolution.x*(int)resolution.y*sizeof(glm::vec3));
//set up depthbuffer
depthbuffer = NULL;
cudaMalloc((void**)&depthbuffer, (int)resolution.x*(int)resolution.y*sizeof(fragment));
//kernel launches to black out accumulated/unaccumlated pixel buffers and clear our scattering states
clearImage<<<fullBlocksPerGrid, threadsPerBlock>>>(resolution, framebuffer, glm::vec3(0,0,0));
fragment frag;
frag.color = glm::vec3(0,0,0);
frag.normal = glm::vec3(0,0,0);
frag.position = glm::vec3(0,0,-10000);
clearDepthBuffer<<<fullBlocksPerGrid, threadsPerBlock>>>(resolution, depthbuffer,frag);
//------------------------------
//memory stuff
//------------------------------
primitives = NULL;
cudaMalloc((void**)&primitives, (ibosize/3)*sizeof(triangle));
primitives2 = NULL;
cudaMalloc((void**)&primitives2, (ibosize/3)*sizeof(triangle));
device_ibo = NULL;
cudaMalloc((void**)&device_ibo, ibosize*sizeof(int));
cudaMemcpy( device_ibo, ibo, ibosize*sizeof(int), cudaMemcpyHostToDevice);
device_vbo = NULL;
cudaMalloc((void**)&device_vbo, vbosize*sizeof(float));
cudaMemcpy( device_vbo, vbo, vbosize*sizeof(float), cudaMemcpyHostToDevice);
device_cbo = NULL;
cudaMalloc((void**)&device_cbo, cbosize*sizeof(float));
cudaMemcpy( device_cbo, cbo, cbosize*sizeof(float), cudaMemcpyHostToDevice);
device_nbo = NULL;
cudaMalloc((void**)&device_nbo, nbosize*sizeof(float));
cudaMemcpy( device_nbo, nbo, nbosize*sizeof(float), cudaMemcpyHostToDevice);
lightDir = glm::vec3(0,0,-1);
lightDir = glm::normalize(lightDir);
glm::vec3 *device_lightDir = NULL;
cudaMalloc((void**)&device_lightDir, cbosize*sizeof(glm::vec3));
cudaMemcpy( device_lightDir, &lightDir, cbosize*sizeof(glm::vec3), cudaMemcpyHostToDevice);
device_tex = NULL;
cudaMalloc((void**)&device_tex, sizeof(bmp_texture));
cudaMemcpy( device_tex, tex, sizeof(bmp_texture), cudaMemcpyHostToDevice);
glm::vec3 *device_data = NULL;
cudaMalloc((void**)&device_data, tex->width * tex->height *sizeof(glm::vec3));
cudaMemcpy( device_data, tex->data, tex->width * tex->height *sizeof(glm::vec3), cudaMemcpyHostToDevice);
tileSize = 32;
int primitiveBlocks = ceil(((float)vbosize/3)/((float)tileSize));
glm::vec3 up(0, 1, 0);
float fovy = 60;
float zNear = 0.001;
float zFar = 10000;
glm::mat4 perspectiveM = glm::perspective(fovy, resolution.x/resolution.y, zNear, zFar);
//------------------------------
//vertex shader
//------------------------------
vertexShadeKernel<<<primitiveBlocks, tileSize>>>(device_vbo, vbosize, device_nbo, nbosize, resolution, zNear, zFar, perspectiveM, view);
cudaDeviceSynchronize();
//------------------------------
//primitive assembly
//------------------------------
primitiveBlocks = ceil(((float)ibosize/3)/((float)tileSize));
primitiveAssemblyKernel<<<primitiveBlocks, tileSize>>>(device_vbo, vbosize, device_cbo, cbosize, device_ibo, ibosize, device_nbo, nbosize, primitives);
cudaDeviceSynchronize();
int numOfPrimitives = ibosize/3 ;
//------------------------------
//culling
//------------------------------
culling(primitives, primitives2, numOfPrimitives);
primitiveBlocks = ceil(((float)numOfPrimitives) / ((float)tileSize));
triangle* temp = primitives;
primitives = primitives2;
primitives2 = temp;
//------------------------------
//rasterization
//------------------------------
if(SHOWBODY || mode==0){
rasterizationKernel<<<primitiveBlocks, tileSize>>>(primitives, numOfPrimitives, depthbuffer, resolution, zNear, zFar, barycenter);
cudaDeviceSynchronize();
}
if(SHOWLINES || mode==1){
linesRasterizeKernel<<<primitiveBlocks, tileSize>>>(primitives, numOfPrimitives, depthbuffer, resolution, zNear, zFar);
cudaDeviceSynchronize();
}
if(SHOWVERTICES || mode==2){
verticesRasterizeKernel<<<primitiveBlocks, tileSize>>>(primitives, numOfPrimitives, depthbuffer, resolution, zNear, zFar);
}
//------------------------------
//fragment shader
//------------------------------
fragmentShadeKernel<<<fullBlocksPerGrid, threadsPerBlock>>>(depthbuffer, resolution, device_lightDir, device_tex, device_data);
//fragmentShadePhongKernel<<<fullBlocksPerGrid, threadsPerBlock>>>(depthbuffer, resolution, lightpos);
cudaDeviceSynchronize();
//------------------------------
//write fragments to framebuffer
//------------------------------
render<<<fullBlocksPerGrid, threadsPerBlock>>>(resolution, depthbuffer, framebuffer);
sendImageToPBO<<<fullBlocksPerGrid, threadsPerBlock>>>(PBOpos, resolution, framebuffer);
cudaDeviceSynchronize();
kernelCleanup();
checkCUDAError("Kernel failed!");
}
void kernelCleanup(){
cudaFree( primitives );
cudaFree( primitives2 );
cudaFree( device_vbo );
cudaFree( device_cbo );
cudaFree( device_ibo );
cudaFree( device_nbo );
cudaFree( framebuffer );
cudaFree( depthbuffer );
}
__global__ void writeColorToPBOKernel(const Color256* color_pixels, uchar4* pbo, const int num_pixels) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
//Don't do anything if the index is out of bounds
if (idx >= num_pixels) {
return;
}
//Grab the pixel value once from global memory
Color256 value = color_pixels[idx];
// Each thread writes one pixel location in the texture (textel)
pbo[idx].w = 0;
pbo[idx].x = value.r;
pbo[idx].y = value.g;
pbo[idx].z = value.b;
}
extern "C" void writeColorToPBO(const Color256* color_pixels, uchar4* pbo, const int num_pixels) {
writeColorToPBOKernel<<<num_pixels / 256 + 1, 256>>>(color_pixels, pbo, num_pixels);
cudaDeviceSynchronize();
}
} // namespace rendering
} // namespace octree_slam
|
the_stack
|
template <typename SUM_T>
__global__ void update_multi_node(
SUM_T *sum_dst, unsigned *count_dst, const SUM_T *hist_sum_parent,
const unsigned *hist_count_parent_count, const SUM_T *sum_src,
const unsigned *count_src, const unsigned *__restrict__ parent_count_iter,
const unsigned hist_size, const unsigned n) {
for (unsigned int i = blockDim.x * blockIdx.x + threadIdx.x; i < n;
i += gridDim.x * blockDim.x) {
int parent_node_id = i / hist_size;
int position = i % hist_size;
int left_node_id = parent_node_id * 2;
int right_node_id = parent_node_id * 2 + 1;
unsigned left_size =
parent_count_iter[left_node_id + 1] - parent_count_iter[left_node_id];
unsigned right_size =
parent_count_iter[right_node_id + 1] - parent_count_iter[right_node_id];
int src_node = left_size <= right_size ? left_node_id : right_node_id;
int affected_node = src_node == left_node_id ? right_node_id : left_node_id;
sum_dst[affected_node * hist_size + position] =
hist_sum_parent[parent_node_id * hist_size + position] -
sum_src[src_node * hist_size + position];
count_dst[affected_node * hist_size + position] =
hist_count_parent_count[parent_node_id * hist_size + position] -
count_src[src_node * hist_size + position];
}
}
/*[[[cog
import cog
sum_types = ['float', 'double', 'float2', 'mydouble2']
cog.outl("// clang-format off")
for t in sum_types:
cog.outl("""template __global__ void update_multi_node<{0}>(
{0} *sum_dst, unsigned *count_dst, const {0} *hist_sum_parent,
const unsigned *hist_count_parent_count, const {0} *sum_src,
const unsigned *count_src, const unsigned *__restrict__ parent_count_iter,
const unsigned hist_size, const unsigned n);""".format(t))
cog.outl("// clang-format on")
]]]*/
// clang-format off
template __global__ void update_multi_node<float>(
float *sum_dst, unsigned *count_dst, const float *hist_sum_parent,
const unsigned *hist_count_parent_count, const float *sum_src,
const unsigned *count_src, const unsigned *__restrict__ parent_count_iter,
const unsigned hist_size, const unsigned n);
template __global__ void update_multi_node<double>(
double *sum_dst, unsigned *count_dst, const double *hist_sum_parent,
const unsigned *hist_count_parent_count, const double *sum_src,
const unsigned *count_src, const unsigned *__restrict__ parent_count_iter,
const unsigned hist_size, const unsigned n);
template __global__ void update_multi_node<float2>(
float2 *sum_dst, unsigned *count_dst, const float2 *hist_sum_parent,
const unsigned *hist_count_parent_count, const float2 *sum_src,
const unsigned *count_src, const unsigned *__restrict__ parent_count_iter,
const unsigned hist_size, const unsigned n);
template __global__ void update_multi_node<mydouble2>(
mydouble2 *sum_dst, unsigned *count_dst, const mydouble2 *hist_sum_parent,
const unsigned *hist_count_parent_count, const mydouble2 *sum_src,
const unsigned *count_src, const unsigned *__restrict__ parent_count_iter,
const unsigned hist_size, const unsigned n);
// clang-format on
//[[[end]]] (checksum: 885ef776f06fb9f79c9d2ee3dd93ff40)
template <typename SUM_T>
__global__ void update(SUM_T *sum_dst, unsigned *count_dst,
const SUM_T *parent_sum, const unsigned *parent_count,
const SUM_T *sum_src, const unsigned *count_src,
const unsigned n) {
for (unsigned int i = blockDim.x * blockIdx.x + threadIdx.x; i < n;
i += gridDim.x * blockDim.x) {
sum_dst[i] = parent_sum[i] - sum_src[i];
count_dst[i] = parent_count[i] - count_src[i];
}
}
/*[[[cog
import cog
sum_types = ['float', 'double', 'float2', 'mydouble2']
cog.outl("// clang-format off")
for t in sum_types:
cog.outl("""template __global__ void update<{0}>({0} *sum_dst, unsigned
*count_dst, const {0} *parent_sum, const unsigned *parent_count, const {0}
*sum_src, const unsigned *count_src, const unsigned n);""".format(t))
cog.outl("// clang-format on")
]]]*/
// clang-format off
template __global__ void update<float>(float *sum_dst, unsigned
*count_dst, const float *parent_sum, const unsigned *parent_count, const float
*sum_src, const unsigned *count_src, const unsigned n);
template __global__ void update<double>(double *sum_dst, unsigned
*count_dst, const double *parent_sum, const unsigned *parent_count, const double
*sum_src, const unsigned *count_src, const unsigned n);
template __global__ void update<float2>(float2 *sum_dst, unsigned
*count_dst, const float2 *parent_sum, const unsigned *parent_count, const float2
*sum_src, const unsigned *count_src, const unsigned n);
template __global__ void update<mydouble2>(mydouble2 *sum_dst, unsigned
*count_dst, const mydouble2 *parent_sum, const unsigned *parent_count, const mydouble2
*sum_src, const unsigned *count_src, const unsigned n);
// clang-format on
//[[[end]]] (checksum: 3a2af9afc26c66fcda0fa6541b4a2370)
template <typename SUM_T, typename GRAD_T, typename BIN_TYPE,
int ITEMS_PER_THREAD>
__global__ void hist_sum_node(SUM_T *dst_sum, unsigned *dst_count,
const GRAD_T *__restrict__ values,
const BIN_TYPE *__restrict__ bin,
const unsigned end_bit, const unsigned segment,
const size_t n) {
constexpr BIN_TYPE NO_DATA = BIN_TYPE(-1);
const int warp_id = threadIdx.x / 32;
const int lane = threadIdx.x % 32;
typedef cub::BlockRadixSort<BIN_TYPE, HIST_SUM_BLOCK_DIM, ITEMS_PER_THREAD,
GRAD_T, 4, false, cub ::BLOCK_SCAN_RAKING>
BlockRadixSort;
typedef cub::WarpScan<
cub::KeyValuePair<BIN_TYPE, cub::KeyValuePair<unsigned short, SUM_T>>>
WarpSum;
__shared__ typename WarpSum::TempStorage temp_scan[HIST_SUM_BLOCK_DIM / 32];
__shared__ typename BlockRadixSort::TempStorage temp_sort;
// Obtain a segment of consecutive items that are blocked across threads
BIN_TYPE thread_keys[ITEMS_PER_THREAD];
GRAD_T thread_values[ITEMS_PER_THREAD];
#pragma unroll
for (unsigned i = 0; i < ITEMS_PER_THREAD; ++i) {
unsigned idx =
blockDim.x * blockIdx.x * ITEMS_PER_THREAD + i * blockDim.x + threadIdx.x;
if (idx < n) {
thread_keys[i] = bin[idx];
thread_values[i] = values[idx];
} else {
thread_keys[i] = NO_DATA;
}
}
// Collectively sort the keys and values among block threads
BlockRadixSort(temp_sort).Sort(thread_keys, thread_values, 0, end_bit);
SUM_T sum_current = thread_values[0];
unsigned short count_current = 1;
#pragma unroll
for (unsigned i = 1; i < ITEMS_PER_THREAD; ++i) {
if (thread_keys[i - 1] == thread_keys[i]) {
sum_current += thread_values[i];
count_current++;
} else {
atomicAdd(&dst_sum[thread_keys[i - 1]], sum_current);
atomicAdd(&dst_count[thread_keys[i - 1]], count_current);
sum_current = thread_values[i];
count_current = 1;
}
}
SUM_T zero;
init(zero);
struct SegmentSum {
__device__ __forceinline__
cub::KeyValuePair<BIN_TYPE, cub::KeyValuePair<unsigned short, SUM_T>>
operator()(
const cub::KeyValuePair<BIN_TYPE,
cub::KeyValuePair<unsigned short, SUM_T>> &a,
const cub::KeyValuePair<
BIN_TYPE, cub::KeyValuePair<unsigned short, SUM_T>> &b) const {
if (b.key > a.key) return b;
cub::KeyValuePair<unsigned short, SUM_T> sum(
a.value.key + b.value.key, a.value.value + b.value.value);
cub::KeyValuePair<BIN_TYPE, cub::KeyValuePair<unsigned short, SUM_T>> v(
a.key, sum);
return v;
}
};
cub::KeyValuePair<unsigned short, SUM_T> initial_sum(count_current,
sum_current);
cub::KeyValuePair<BIN_TYPE, cub::KeyValuePair<unsigned short, SUM_T>> initial(
thread_keys[ITEMS_PER_THREAD - 1], initial_sum);
cub::KeyValuePair<unsigned short, SUM_T> zero_sum_(0, zero);
cub::KeyValuePair<BIN_TYPE, cub::KeyValuePair<unsigned short, SUM_T>> zero_(
0, zero_sum_);
WarpSum(temp_scan[warp_id])
.ExclusiveScan(initial, initial, zero_, SegmentSum());
// flush previous segment
if (thread_keys[ITEMS_PER_THREAD - 1] != initial.key) {
atomicAdd(&dst_sum[initial.key], initial.value.value);
atomicAdd(&dst_count[initial.key], initial.value.key);
}
// last thread also need to handle it's own sum
if (lane == 31 && thread_keys[ITEMS_PER_THREAD - 1] != NO_DATA) {
// flush all collected data
if (thread_keys[ITEMS_PER_THREAD - 1] == initial.key) {
atomicAdd(&dst_sum[thread_keys[ITEMS_PER_THREAD - 1]],
sum_current + initial.value.value);
atomicAdd(&dst_count[thread_keys[ITEMS_PER_THREAD - 1]],
count_current + initial.value.key);
} else { // only thread local sum
atomicAdd(&dst_sum[thread_keys[ITEMS_PER_THREAD - 1]], sum_current);
atomicAdd(&dst_count[thread_keys[ITEMS_PER_THREAD - 1]], count_current);
}
}
}
// clang-format off
/*[[[cog
import cog
for t in [('float', 'float'), ('float', 'double'), ('float2', 'float2'), ('float2', 'mydouble2')]:
for bin_type in ['unsigned short', 'unsigned char']:
cog.outl("""template __global__ void hist_sum_node<{0}, {1}, {2}>(
{0} *dst_sum, unsigned *dst_count, const {1} *__restrict__ values,
const {2} *__restrict__ bin, const unsigned end_bit,
const unsigned segment, const size_t n);""".format(
t[1], t[0], bin_type))
]]]*/
template __global__ void hist_sum_node<float, float, unsigned short>(
float *dst_sum, unsigned *dst_count, const float *__restrict__ values,
const unsigned short *__restrict__ bin, const unsigned end_bit,
const unsigned segment, const size_t n);
template __global__ void hist_sum_node<float, float, unsigned char>(
float *dst_sum, unsigned *dst_count, const float *__restrict__ values,
const unsigned char *__restrict__ bin, const unsigned end_bit,
const unsigned segment, const size_t n);
template __global__ void hist_sum_node<double, float, unsigned short>(
double *dst_sum, unsigned *dst_count, const float *__restrict__ values,
const unsigned short *__restrict__ bin, const unsigned end_bit,
const unsigned segment, const size_t n);
template __global__ void hist_sum_node<double, float, unsigned char>(
double *dst_sum, unsigned *dst_count, const float *__restrict__ values,
const unsigned char *__restrict__ bin, const unsigned end_bit,
const unsigned segment, const size_t n);
template __global__ void hist_sum_node<float2, float2, unsigned short>(
float2 *dst_sum, unsigned *dst_count, const float2 *__restrict__ values,
const unsigned short *__restrict__ bin, const unsigned end_bit,
const unsigned segment, const size_t n);
template __global__ void hist_sum_node<float2, float2, unsigned char>(
float2 *dst_sum, unsigned *dst_count, const float2 *__restrict__ values,
const unsigned char *__restrict__ bin, const unsigned end_bit,
const unsigned segment, const size_t n);
template __global__ void hist_sum_node<mydouble2, float2, unsigned short>(
mydouble2 *dst_sum, unsigned *dst_count, const float2 *__restrict__ values,
const unsigned short *__restrict__ bin, const unsigned end_bit,
const unsigned segment, const size_t n);
template __global__ void hist_sum_node<mydouble2, float2, unsigned char>(
mydouble2 *dst_sum, unsigned *dst_count, const float2 *__restrict__ values,
const unsigned char *__restrict__ bin, const unsigned end_bit,
const unsigned segment, const size_t n);
//[[[end]]] (checksum: 6858952bec46e367f32f00e13445c7b9)
// clang-format on
template <typename SUM_T, typename GRAD_T, typename BIN_TYPE, bool USE_TRICK,
int ITEMS_PER_THREAD>
__global__ void hist_sum_multi_node(
SUM_T *dst_sum, unsigned *dst_count, const SUM_T *hist_sum_parent,
const unsigned *hist_count_parent, const GRAD_T *__restrict__ values,
const unsigned *__restrict__ parent_count_iter,
const BIN_TYPE *__restrict__ bin, const unsigned hist_size,
const unsigned end_bit, const int blocks_per_node) {
constexpr BIN_TYPE NO_DATA = BIN_TYPE(-1);
const int warp_id = threadIdx.x / 32;
const int lane = threadIdx.x % 32;
const int blocks_per_node_size = (1 << blocks_per_node);
int node_id = blockIdx.x >> blocks_per_node;
unsigned node_start;
unsigned node_size;
if (USE_TRICK) {
int node_left = node_id * 2;
int node_right = node_id * 2 + 1;
const unsigned x = parent_count_iter[node_left];
const unsigned y = parent_count_iter[node_right];
const unsigned z = parent_count_iter[node_right + 1];
const unsigned left_size = y - x;
const unsigned right_size = z - y;
node_id = left_size > right_size ? node_right : node_left;
node_start = left_size > right_size ? y : x;
node_size = left_size > right_size ? right_size : left_size;
} else {
node_start = parent_count_iter[node_id];
node_size = parent_count_iter[node_id + 1] - node_start;
}
typedef cub::BlockRadixSort<BIN_TYPE, HIST_SUM_BLOCK_DIM, ITEMS_PER_THREAD,
GRAD_T, 4, false, cub ::BLOCK_SCAN_RAKING>
BlockRadixSort;
typedef cub::WarpScan<
cub::KeyValuePair<BIN_TYPE, cub::KeyValuePair<unsigned short, SUM_T>>>
WarpSum;
__shared__ typename WarpSum::TempStorage temp_scan[HIST_SUM_BLOCK_DIM / 32];
__shared__ typename BlockRadixSort::TempStorage temp_sort;
// Obtain a segment of consecutive items that are blocked across threads
BIN_TYPE thread_keys[ITEMS_PER_THREAD];
GRAD_T thread_values[ITEMS_PER_THREAD];
for (int block_offset =
blockIdx.x - (blockIdx.x >> blocks_per_node) * blocks_per_node_size;
block_offset * ITEMS_PER_THREAD * HIST_SUM_BLOCK_DIM < node_size;
block_offset += blocks_per_node_size) {
#pragma unroll
for (unsigned i = 0; i < ITEMS_PER_THREAD; ++i) {
unsigned idx = block_offset * ITEMS_PER_THREAD * HIST_SUM_BLOCK_DIM +
i * HIST_SUM_BLOCK_DIM + threadIdx.x;
if (idx < node_size) {
thread_keys[i] = bin[idx + node_start];
thread_values[i] = values[idx + node_start];
} else {
thread_keys[i] = NO_DATA;
}
}
// Collectively sort the keys and values among block threads
BlockRadixSort(temp_sort).Sort(thread_keys, thread_values, 0, end_bit);
SUM_T sum_current = thread_values[0];
unsigned short count_current = 1;
#pragma unroll
for (unsigned i = 1; i < ITEMS_PER_THREAD; ++i) {
if (thread_keys[i - 1] == thread_keys[i]) {
sum_current += thread_values[i];
count_current++;
} else {
atomicAdd(&dst_sum[thread_keys[i - 1] + node_id * hist_size],
sum_current);
atomicAdd(&dst_count[thread_keys[i - 1] + node_id * hist_size],
count_current);
sum_current = thread_values[i];
count_current = 1;
}
}
SUM_T zero;
init(zero);
struct SegmentSum {
__device__ __forceinline__
cub::KeyValuePair<BIN_TYPE, cub::KeyValuePair<unsigned short, SUM_T>>
operator()(
const cub::KeyValuePair<BIN_TYPE,
cub::KeyValuePair<unsigned short, SUM_T>> &a,
const cub::KeyValuePair<
BIN_TYPE, cub::KeyValuePair<unsigned short, SUM_T>> &b) const {
if (b.key > a.key) return b;
cub::KeyValuePair<unsigned short, SUM_T> sum(
a.value.key + b.value.key, a.value.value + b.value.value);
cub::KeyValuePair<BIN_TYPE, cub::KeyValuePair<unsigned short, SUM_T>> v(
a.key, sum);
return v;
}
};
cub::KeyValuePair<unsigned short, SUM_T> initial_sum(count_current,
sum_current);
cub::KeyValuePair<BIN_TYPE, cub::KeyValuePair<unsigned short, SUM_T>>
initial(thread_keys[ITEMS_PER_THREAD - 1], initial_sum);
cub::KeyValuePair<unsigned short, SUM_T> zero_sum_(0, zero);
cub::KeyValuePair<BIN_TYPE, cub::KeyValuePair<unsigned short, SUM_T>> zero_(
0, zero_sum_);
WarpSum(temp_scan[warp_id])
.ExclusiveScan(initial, initial, zero_, SegmentSum());
// flush previous segment
if (thread_keys[ITEMS_PER_THREAD - 1] != initial.key) {
atomicAdd(&dst_sum[initial.key + node_id * hist_size],
initial.value.value);
atomicAdd(&dst_count[initial.key + node_id * hist_size],
initial.value.key);
}
// last thread also need to handle it's own sum
if (lane == 31 && thread_keys[ITEMS_PER_THREAD - 1] != NO_DATA) {
// flush all collected data
if (thread_keys[ITEMS_PER_THREAD - 1] == initial.key) {
atomicAdd(
&dst_sum[thread_keys[ITEMS_PER_THREAD - 1] + node_id * hist_size],
sum_current + initial.value.value);
atomicAdd(
&dst_count[thread_keys[ITEMS_PER_THREAD - 1] + node_id * hist_size],
count_current + initial.value.key);
} else { // only thread local sum
atomicAdd(
&dst_sum[thread_keys[ITEMS_PER_THREAD - 1] + node_id * hist_size],
sum_current);
atomicAdd(
&dst_count[thread_keys[ITEMS_PER_THREAD - 1] + node_id * hist_size],
count_current);
}
}
}
}
// clang-format off
/*[[[cog
import cog
for t in [('float', 'float'), ('float', 'double'), ('float2', 'float2'), ('float2', 'mydouble2')]:
for bin_type in ['unsigned short', 'unsigned char']:
cog.outl("""template __global__ void
hist_sum_multi_node<{0}, {1}, {2}, false>(
{0} *dst_sum, unsigned *dst_count, const {0} *hist_sum_parent,
const unsigned *hist_count_parent, const {1} *__restrict__ values,
const unsigned *__restrict__ parent_count_iter,
const {2} *__restrict__ bin, const unsigned hist_size,
const unsigned end_bit, const int blocks_per_node);""".format(
t[1], t[0], bin_type))
cog.outl("""template __global__ void
hist_sum_multi_node<{0}, {1}, {2}, true>(
{0} *dst_sum, unsigned *dst_count, const {0} *hist_sum_parent,
const unsigned *hist_count_parent, const {1} *__restrict__ values,
const unsigned *__restrict__ parent_count_iter,
const {2} *__restrict__ bin, const unsigned hist_size,
const unsigned end_bit, const int blocks_per_node);""".format(
t[1], t[0], bin_type))
]]]*/
template __global__ void
hist_sum_multi_node<float, float, unsigned short, false>(
float *dst_sum, unsigned *dst_count, const float *hist_sum_parent,
const unsigned *hist_count_parent, const float *__restrict__ values,
const unsigned *__restrict__ parent_count_iter,
const unsigned short *__restrict__ bin, const unsigned hist_size,
const unsigned end_bit, const int blocks_per_node);
template __global__ void
hist_sum_multi_node<float, float, unsigned short, true>(
float *dst_sum, unsigned *dst_count, const float *hist_sum_parent,
const unsigned *hist_count_parent, const float *__restrict__ values,
const unsigned *__restrict__ parent_count_iter,
const unsigned short *__restrict__ bin, const unsigned hist_size,
const unsigned end_bit, const int blocks_per_node);
template __global__ void
hist_sum_multi_node<float, float, unsigned char, false>(
float *dst_sum, unsigned *dst_count, const float *hist_sum_parent,
const unsigned *hist_count_parent, const float *__restrict__ values,
const unsigned *__restrict__ parent_count_iter,
const unsigned char *__restrict__ bin, const unsigned hist_size,
const unsigned end_bit, const int blocks_per_node);
template __global__ void
hist_sum_multi_node<float, float, unsigned char, true>(
float *dst_sum, unsigned *dst_count, const float *hist_sum_parent,
const unsigned *hist_count_parent, const float *__restrict__ values,
const unsigned *__restrict__ parent_count_iter,
const unsigned char *__restrict__ bin, const unsigned hist_size,
const unsigned end_bit, const int blocks_per_node);
template __global__ void
hist_sum_multi_node<double, float, unsigned short, false>(
double *dst_sum, unsigned *dst_count, const double *hist_sum_parent,
const unsigned *hist_count_parent, const float *__restrict__ values,
const unsigned *__restrict__ parent_count_iter,
const unsigned short *__restrict__ bin, const unsigned hist_size,
const unsigned end_bit, const int blocks_per_node);
template __global__ void
hist_sum_multi_node<double, float, unsigned short, true>(
double *dst_sum, unsigned *dst_count, const double *hist_sum_parent,
const unsigned *hist_count_parent, const float *__restrict__ values,
const unsigned *__restrict__ parent_count_iter,
const unsigned short *__restrict__ bin, const unsigned hist_size,
const unsigned end_bit, const int blocks_per_node);
template __global__ void
hist_sum_multi_node<double, float, unsigned char, false>(
double *dst_sum, unsigned *dst_count, const double *hist_sum_parent,
const unsigned *hist_count_parent, const float *__restrict__ values,
const unsigned *__restrict__ parent_count_iter,
const unsigned char *__restrict__ bin, const unsigned hist_size,
const unsigned end_bit, const int blocks_per_node);
template __global__ void
hist_sum_multi_node<double, float, unsigned char, true>(
double *dst_sum, unsigned *dst_count, const double *hist_sum_parent,
const unsigned *hist_count_parent, const float *__restrict__ values,
const unsigned *__restrict__ parent_count_iter,
const unsigned char *__restrict__ bin, const unsigned hist_size,
const unsigned end_bit, const int blocks_per_node);
template __global__ void
hist_sum_multi_node<float2, float2, unsigned short, false>(
float2 *dst_sum, unsigned *dst_count, const float2 *hist_sum_parent,
const unsigned *hist_count_parent, const float2 *__restrict__ values,
const unsigned *__restrict__ parent_count_iter,
const unsigned short *__restrict__ bin, const unsigned hist_size,
const unsigned end_bit, const int blocks_per_node);
template __global__ void
hist_sum_multi_node<float2, float2, unsigned short, true>(
float2 *dst_sum, unsigned *dst_count, const float2 *hist_sum_parent,
const unsigned *hist_count_parent, const float2 *__restrict__ values,
const unsigned *__restrict__ parent_count_iter,
const unsigned short *__restrict__ bin, const unsigned hist_size,
const unsigned end_bit, const int blocks_per_node);
template __global__ void
hist_sum_multi_node<float2, float2, unsigned char, false>(
float2 *dst_sum, unsigned *dst_count, const float2 *hist_sum_parent,
const unsigned *hist_count_parent, const float2 *__restrict__ values,
const unsigned *__restrict__ parent_count_iter,
const unsigned char *__restrict__ bin, const unsigned hist_size,
const unsigned end_bit, const int blocks_per_node);
template __global__ void
hist_sum_multi_node<float2, float2, unsigned char, true>(
float2 *dst_sum, unsigned *dst_count, const float2 *hist_sum_parent,
const unsigned *hist_count_parent, const float2 *__restrict__ values,
const unsigned *__restrict__ parent_count_iter,
const unsigned char *__restrict__ bin, const unsigned hist_size,
const unsigned end_bit, const int blocks_per_node);
template __global__ void
hist_sum_multi_node<mydouble2, float2, unsigned short, false>(
mydouble2 *dst_sum, unsigned *dst_count, const mydouble2 *hist_sum_parent,
const unsigned *hist_count_parent, const float2 *__restrict__ values,
const unsigned *__restrict__ parent_count_iter,
const unsigned short *__restrict__ bin, const unsigned hist_size,
const unsigned end_bit, const int blocks_per_node);
template __global__ void
hist_sum_multi_node<mydouble2, float2, unsigned short, true>(
mydouble2 *dst_sum, unsigned *dst_count, const mydouble2 *hist_sum_parent,
const unsigned *hist_count_parent, const float2 *__restrict__ values,
const unsigned *__restrict__ parent_count_iter,
const unsigned short *__restrict__ bin, const unsigned hist_size,
const unsigned end_bit, const int blocks_per_node);
template __global__ void
hist_sum_multi_node<mydouble2, float2, unsigned char, false>(
mydouble2 *dst_sum, unsigned *dst_count, const mydouble2 *hist_sum_parent,
const unsigned *hist_count_parent, const float2 *__restrict__ values,
const unsigned *__restrict__ parent_count_iter,
const unsigned char *__restrict__ bin, const unsigned hist_size,
const unsigned end_bit, const int blocks_per_node);
template __global__ void
hist_sum_multi_node<mydouble2, float2, unsigned char, true>(
mydouble2 *dst_sum, unsigned *dst_count, const mydouble2 *hist_sum_parent,
const unsigned *hist_count_parent, const float2 *__restrict__ values,
const unsigned *__restrict__ parent_count_iter,
const unsigned char *__restrict__ bin, const unsigned hist_size,
const unsigned end_bit, const int blocks_per_node);
//[[[end]]] (checksum: 25c0c64a105cac1fa2bd85171d8543e1)
// clang-format on
template <typename SUM_T, typename GRAD_T, typename BIN_T>
__global__ void hist_sum(SUM_T *dst_sum, unsigned *dst_count,
const GRAD_T *__restrict__ values,
const unsigned *__restrict__ parent_count_iter,
const BIN_T *__restrict__ bin, const unsigned bins,
const size_t n) {
for (unsigned int i = blockDim.x * blockIdx.x + threadIdx.x; i < n;
i += gridDim.x * blockDim.x) {
// TODO: Binary search?
unsigned segment = 0;
while ((i >= parent_count_iter[segment + 1])) {
segment++;
}
unsigned idx = segment * bins + bin[i];
SUM_T val = values[i];
atomicAdd(&dst_sum[idx], val);
atomicAdd(&dst_count[idx], 1);
}
}
// clang-format off
/*[[[cog
import cog
for t in [('float', 'float'), ('float', 'double'), ('float2', 'float2'), ('float2', 'mydouble2')]:
for bin_type in ['unsigned short', 'unsigned char']:
cog.outl("""template __global__ void hist_sum<{0}, {1}, {2}>(
{0} *dst_sum, unsigned *dst_count, const {1} *__restrict__ values,
const unsigned *__restrict__ parent_count_iter,
const {2} *__restrict__ bin, const unsigned bins, const size_t n);""".format(
t[1], t[0], bin_type))
]]]*/
template __global__ void hist_sum<float, float, unsigned short>(
float *dst_sum, unsigned *dst_count, const float *__restrict__ values,
const unsigned *__restrict__ parent_count_iter,
const unsigned short *__restrict__ bin, const unsigned bins, const size_t n);
template __global__ void hist_sum<float, float, unsigned char>(
float *dst_sum, unsigned *dst_count, const float *__restrict__ values,
const unsigned *__restrict__ parent_count_iter,
const unsigned char *__restrict__ bin, const unsigned bins, const size_t n);
template __global__ void hist_sum<double, float, unsigned short>(
double *dst_sum, unsigned *dst_count, const float *__restrict__ values,
const unsigned *__restrict__ parent_count_iter,
const unsigned short *__restrict__ bin, const unsigned bins, const size_t n);
template __global__ void hist_sum<double, float, unsigned char>(
double *dst_sum, unsigned *dst_count, const float *__restrict__ values,
const unsigned *__restrict__ parent_count_iter,
const unsigned char *__restrict__ bin, const unsigned bins, const size_t n);
template __global__ void hist_sum<float2, float2, unsigned short>(
float2 *dst_sum, unsigned *dst_count, const float2 *__restrict__ values,
const unsigned *__restrict__ parent_count_iter,
const unsigned short *__restrict__ bin, const unsigned bins, const size_t n);
template __global__ void hist_sum<float2, float2, unsigned char>(
float2 *dst_sum, unsigned *dst_count, const float2 *__restrict__ values,
const unsigned *__restrict__ parent_count_iter,
const unsigned char *__restrict__ bin, const unsigned bins, const size_t n);
template __global__ void hist_sum<mydouble2, float2, unsigned short>(
mydouble2 *dst_sum, unsigned *dst_count, const float2 *__restrict__ values,
const unsigned *__restrict__ parent_count_iter,
const unsigned short *__restrict__ bin, const unsigned bins, const size_t n);
template __global__ void hist_sum<mydouble2, float2, unsigned char>(
mydouble2 *dst_sum, unsigned *dst_count, const float2 *__restrict__ values,
const unsigned *__restrict__ parent_count_iter,
const unsigned char *__restrict__ bin, const unsigned bins, const size_t n);
//[[[end]]] (checksum: ccde6b65791a97b2a0a90e1c9694abda)
// clang-format on
|
the_stack
|
extern "C" {
#include "sph/sph_blake.h"
}
#include "cuda_helper.h"
#include "miner.h"
#include <memory.h>
#define TPB 768
static const uint32_t c_IV256[8] = {
0x6A09E667, 0xBB67AE85,
0x3C6EF372, 0xA54FF53A,
0x510E527F, 0x9B05688C,
0x1F83D9AB, 0x5BE0CD19
};
__device__ __constant__ uint32_t _ALIGN(16) c_h[ 8];
__device__ __constant__ uint32_t _ALIGN(16) c_v[16];
__device__ __constant__ uint32_t _ALIGN(16) c_m[16];
__device__ __constant__ uint32_t _ALIGN(16) c_x[60];
#define GSn(a,b,c,d,x,y) { \
v[a]+= x + v[b]; \
v[d] = ROL16(v[d] ^ v[a]); \
v[c]+= v[d]; \
v[b] = ROTR32(v[b] ^ v[c], 12); \
v[a]+= y + v[b]; \
v[d] = ROR8(v[d] ^ v[a]); \
v[c]+= v[d]; \
v[b] = ROTR32(v[b] ^ v[c], 7); \
}
#define GSn4(a,b,c,d,x,y,a1,b1,c1,d1,x1,y1,a2,b2,c2,d2,x2,y2,a3,b3,c3,d3,x3,y3) { \
v[ a]+= x + v[ b]; v[a1]+= x1 + v[b1]; v[a2]+= x2 + v[b2]; v[a3]+= x3 + v[b3]; \
v[ d] = ROL16(v[ d] ^ v[ a]); v[d1] = ROL16(v[d1] ^ v[a1]); v[d2] = ROL16(v[d2] ^ v[a2]); v[d3] = ROL16(v[d3] ^ v[a3]); \
v[ c]+= v[ d]; v[c1]+= v[d1]; v[c2]+= v[d2]; v[c3]+= v[d3]; \
v[ b] = ROTR32(v[ b] ^ v[ c], 12); v[b1] = ROTR32(v[b1] ^ v[c1], 12); v[b2] = ROTR32(v[b2] ^ v[c2], 12); v[b3] = ROTR32(v[b3] ^ v[c3], 12); \
v[ a]+= y + v[ b]; v[a1]+= y1 + v[b1]; v[a2]+= y2 + v[b2]; v[a3]+= y3 + v[b3]; \
v[ d] = ROR8(v[ d] ^ v[ a]); v[d1] = ROR8(v[d1] ^ v[a1]); v[d2] = ROR8(v[d2] ^ v[a2]); v[d3] = ROR8(v[d3] ^ v[a3]); \
v[ c]+= v[ d]; v[c1]+= v[d1]; v[c2]+= v[d2]; v[c3]+= v[d3]; \
v[ b] = ROTR32(v[ b] ^ v[ c], 7); v[b1] = ROTR32(v[b1] ^ v[c1], 7); v[b2] = ROTR32(v[b2] ^ v[c2], 7); v[b3] = ROTR32(v[b3] ^ v[c3], 7); \
}
#define GSn3(a,b,c,d,x,y,a1,b1,c1,d1,x1,y1,a2,b2,c2,d2,x2,y2) { \
v[ a]+= x + v[ b]; v[a1]+= x1 + v[b1]; v[a2]+= x2 + v[b2];\
v[ d] = ROL16(v[ d] ^ v[ a]); v[d1] = ROL16(v[d1] ^ v[a1]); v[d2] = ROL16(v[d2] ^ v[a2]);\
v[ c]+= v[ d]; v[c1]+= v[d1]; v[c2]+= v[d2];\
v[ b] = ROTR32(v[ b] ^ v[ c], 12); v[b1] = ROTR32(v[b1] ^ v[c1], 12); v[b2] = ROTR32(v[b2] ^ v[c2], 12);\
v[ a]+= y + v[ b]; v[a1]+= y1 + v[b1]; v[a2]+= y2 + v[b2];\
v[ d] = ROR8(v[ d] ^ v[ a]); v[d1] = ROR8(v[d1] ^ v[a1]); v[d2] = ROR8(v[d2] ^ v[a2]);\
v[ c]+= v[ d]; v[c1]+= v[d1]; v[c2]+= v[d2];\
v[ b] = ROTR32(v[ b] ^ v[ c], 7); v[b1] = ROTR32(v[b1] ^ v[c1], 7); v[b2] = ROTR32(v[b2] ^ v[c2], 7);\
}
#define hostGS(a,b,c,d,x) { \
const uint32_t idx1 = c_sigma[r][x]; \
const uint32_t idx2 = c_sigma[r][x+1]; \
v[a] += (m[idx1] ^ z[idx2]) + v[b]; \
v[d] = ROTR32(v[d] ^ v[a], 16); \
v[c] += v[d]; \
v[b] = ROTR32(v[b] ^ v[c], 12); \
\
v[a] += (m[idx2] ^ z[idx1]) + v[b]; \
v[d] = ROTR32(v[d] ^ v[a], 8); \
v[c] += v[d]; \
v[b] = ROTR32(v[b] ^ v[c], 7); \
}
#define hostGSn(a,b,c,d,x,y) { \
v[a] += (m[x] ^ z[y]) + v[b]; \
v[d] = ROTR32(v[d] ^ v[a], 16); \
v[c] += v[d]; \
v[b] = ROTR32(v[b] ^ v[c], 12); \
v[a] += (m[y] ^ z[x]) + v[b]; \
v[d] = ROTR32(v[d] ^ v[a], 8); \
v[c] += v[d]; \
v[b] = ROTR32(v[b] ^ v[c], 7); \
}
__host__ __forceinline__
static void blake256_14round_compress1st(uint32_t *h, const uint32_t *block, const uint32_t T0){
uint32_t m[16];
uint32_t v[16];
const uint32_t c_sigma[16][16] = {
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },{ 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 },
{ 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 },{ 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 },
{ 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 },{ 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 },
{ 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11 },{ 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 },
{ 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5 },{ 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0 },
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },{ 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 },
{ 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 },{ 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 },
{ 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 },{ 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 }
};
const uint32_t z[16] = {
0x243F6A88, 0x85A308D3, 0x13198A2E, 0x03707344, 0xA4093822, 0x299F31D0, 0x082EFA98, 0xEC4E6C89,
0x452821E6, 0x38D01377, 0xBE5466CF, 0x34E90C6C, 0xC0AC29B7, 0xC97C50DD, 0x3F84D5B5, 0xB5470917
};
for (int i = 0; i < 16; i++) {
m[i] = block[i];
}
for (int i = 0; i < 8; i++)
v[i] = h[i];
v[8] = z[0];
v[9] = z[1];
v[10] = z[2];
v[11] = z[3];
v[12] = z[4] ^ T0;
v[13] = z[5] ^ T0;
v[14] = z[6];
v[15] = z[7];
for (int r = 0; r < 14; r++) {
/* column step */
hostGS(0, 4, 0x8, 0xC, 0x0);
hostGS(1, 5, 0x9, 0xD, 0x2);
hostGS(2, 6, 0xA, 0xE, 0x4);
hostGS(3, 7, 0xB, 0xF, 0x6);
/* diagonal step */
hostGS(0, 5, 0xA, 0xF, 0x8);
hostGS(1, 6, 0xB, 0xC, 0xA);
hostGS(2, 7, 0x8, 0xD, 0xC);
hostGS(3, 4, 0x9, 0xE, 0xE);
}
for (int i = 0; i < 16; i++) {
int j = i & 7;
h[j] ^= v[i];
}
}
__global__
void blake256_14round_gpu_hash_80(const uint32_t threads, const uint32_t startNonce, uint2 * Hash){
const uint32_t thread = blockDim.x * blockIdx.x + threadIdx.x;
const uint32_t z[16] = {
0x243F6A88, 0x85A308D3, 0x13198A2E, 0x03707344,0xA4093822, 0x299F31D0, 0x082EFA98, 0xEC4E6C89,
0x452821E6, 0x38D01377, 0xBE5466CF, 0x34E90C6C,0xC0AC29B7, 0xC97C50DD, 0x3F84D5B5, 0xB5470917
};
uint32_t v[16];
uint32_t xors[16];
if(thread<threads){
volatile uint32_t nonce = startNonce+thread;
#pragma unroll
for(int i=0;i<16;i++){
v[i] = c_v[i];
}
int i=0;
//partial: 0{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }
xors[ 0] = c_x[i++]; xors[ 1] = c_x[i++]; xors[ 2] = c_x[i++]; xors[ 3] = z[ 7];
xors[ 4] = c_x[i++]; xors[ 5] = z[ 2]^nonce; xors[ 6] = z[ 4]; xors[ 7] = z[ 6];
xors[ 8] = z[ 9]; xors[ 9] = z[11]; xors[10] = z[13]; xors[11] = z[15];
xors[12] = z[ 8]; xors[13] = z[10]; xors[14] = c_x[i++]; xors[15] = c_x[i++];
v[ 1]+= xors[ 5]; v[13] = ROR8(v[13] ^ v[1]);
v[ 9]+= v[13]; v[ 5] = ROTR32(v[5] ^ v[9], 7);
v[ 0]+= v[5]; v[15] = ROL16(v[15] ^ v[0]);
v[10]+= v[15]; v[ 5] = ROTR32(v[5] ^ v[10], 12);
v[ 0]+= xors[12] + v[5]; v[15] = ROR8(v[15] ^ v[0]);
v[10]+= v[15]; v[ 5] = ROTR32(v[5] ^ v[10], 7);
// GSn3(1, 6,11,12,xors[ 9],xors[13], 2, 7, 8,13,xors[10],xors[14], 3, 4, 9,14,xors[11],xors[15]);
v[ 1]+= xors[ 9] + v[ 6];
v[12] = ROL16(v[12] ^ v[ 1]); v[13] = ROL16(v[13] ^ v[ 2]);
v[11]+= v[12]; v[ 8]+= v[13]; v[ 9]+= v[14];
v[ 6] = ROTR32(v[ 6] ^ v[11], 12); v[ 7] = ROTR32(v[ 7] ^ v[ 8], 12); v[ 4] = ROTR32(v[ 4] ^ v[ 9], 12);
v[ 1]+= xors[13] + v[ 6]; v[ 2]+= xors[14] + v[ 7]; v[ 3]+= xors[15] + v[ 4];
v[12] = ROR8(v[12] ^ v[ 1]); v[13] = ROR8(v[13] ^ v[ 2]); v[14] = ROR8(v[14] ^ v[ 3]);
v[11]+= v[12]; v[ 8]+= v[13]; v[ 9]+= v[14];
v[ 6] = ROTR32(v[ 6] ^ v[11], 7); v[ 7] = ROTR32(v[ 7] ^ v[ 8], 7); v[ 4] = ROTR32(v[ 4] ^ v[ 9], 7);
// 1{ 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 }
xors[ 0] = z[10]; xors[ 1] = c_x[i++]; xors[ 2] = z[15]; xors[ 3] = c_x[i++];
xors[ 4] = z[14]; xors[ 5] = z[ 4]; xors[ 6] = c_x[i++]; xors[ 7] = z[13];
xors[ 8] = c_x[i++]; xors[ 9] = c_x[i++]; xors[10] = z[ 7]; xors[11] = z[ 3];
xors[12] = z[ 1]; xors[13] = c_x[i++]; xors[14] = z[11]; xors[15] = z[ 5]^nonce;
GSn4(0, 4, 8,12, xors[ 0],xors[ 4], 1, 5, 9,13, xors[ 1],xors[ 5], 2, 6,10,14, xors[ 2],xors[ 6], 3, 7,11,15, xors[ 3],xors[ 7]);
GSn4(0, 5,10,15, xors[ 8],xors[12], 1, 6,11,12, xors[ 9],xors[13], 2, 7, 8,13, xors[10],xors[14], 3, 4, 9,14, xors[11],xors[15]);
// 2{ 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 }
xors[ 0] = z[ 8]; xors[ 1] = z[ 0]; xors[ 2] = z[ 2]; xors[ 3] = c_x[i++];
xors[ 4] = z[11]; xors[ 5] = c_x[i++]; xors[ 6] = c_x[i++]; xors[ 7] = c_x[i++];
xors[ 8] = z[14]; xors[ 9] = nonce^z[ 6]; xors[10] = z[ 1]; xors[11] = z[ 4];
xors[12] = z[10]; xors[13] = z[ 3]; xors[14] = c_x[i++]; xors[15] = c_x[i++];
GSn4(0, 4, 8,12, xors[ 0],xors[ 4], 1, 5, 9,13, xors[ 1],xors[ 5], 2, 6,10,14, xors[ 2],xors[ 6], 3, 7,11,15, xors[ 3],xors[ 7]);
GSn4(0, 5,10,15, xors[ 8],xors[12], 1, 6,11,12, xors[ 9],xors[13], 2, 7, 8,13, xors[10],xors[14], 3, 4, 9,14, xors[11],xors[15]);
// 3{ 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 }
xors[ 0] = z[ 9]; xors[ 1] = nonce^z[ 1]; xors[ 2] = c_x[i++]; xors[ 3] = z[14];
xors[ 4] = z[ 7]; xors[ 5] = c_x[i++]; xors[ 6] = z[13]; xors[ 7] = z[11];
xors[ 8] = c_x[i++]; xors[ 9] = z[10]; xors[10] = c_x[i++]; xors[11] = c_x[i++];
xors[12] = z[ 2]; xors[13] = z[ 5]; xors[14] = c_x[i++]; xors[15] = z[15];
GSn4(0, 4, 8,12, xors[ 0],xors[ 4], 1, 5, 9,13, xors[ 1],xors[ 5], 2, 6,10,14, xors[ 2],xors[ 6], 3, 7,11,15, xors[ 3],xors[ 7]);
GSn4(0, 5,10,15, xors[ 8],xors[12], 1, 6,11,12, xors[ 9],xors[13], 2, 7, 8,13, xors[10],xors[14], 3, 4, 9,14, xors[11],xors[15]);
// 4{ 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 }
xors[ 0] = z[ 0]; xors[ 1] = z[ 7]; xors[ 2] = c_x[i++]; xors[ 3] = z[15];
xors[ 4] = c_x[i++]; xors[ 5] = z[ 5]; xors[ 6] = c_x[i++]; xors[ 7] = c_x[i++];
xors[ 8] = z[ 1]; xors[ 9] = z[12]; xors[10] = z[ 8]; xors[11] = nonce^z[13];
xors[12] = c_x[i++]; xors[13] = z[11]; xors[14] = z[ 6]; xors[15] = c_x[i++];
GSn4(0, 4, 8,12, xors[ 0],xors[ 4], 1, 5, 9,13, xors[ 1],xors[ 5], 2, 6,10,14, xors[ 2],xors[ 6], 3, 7,11,15, xors[ 3],xors[ 7]);
GSn4(0, 5,10,15, xors[ 8],xors[12], 1, 6,11,12, xors[ 9],xors[13], 2, 7, 8,13, xors[10],xors[14], 3, 4, 9,14, xors[11],xors[15]);
// 5{ 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 }
xors[ 0] = c_x[i++]; xors[ 1] = z[10]; xors[ 2] = c_x[i++]; xors[ 3] = z[ 3];
xors[ 4] = z[ 2]; xors[ 5] = z[ 6]; xors[ 6] = z[ 0]; xors[ 7] = z[ 8]^nonce;
xors[ 8] = c_x[i++]; xors[ 9] = z[ 5]; xors[10] = c_x[i++]; xors[11] = c_x[i++];
xors[12] = c_x[i++]; xors[13] = z[ 7]; xors[14] = z[15]; xors[15] = z[ 1];
GSn4(0, 4, 8,12, xors[ 0],xors[ 4], 1, 5, 9,13, xors[ 1],xors[ 5], 2, 6,10,14, xors[ 2],xors[ 6], 3, 7,11,15, xors[ 3],xors[ 7]);
GSn4(0, 5,10,15, xors[ 8],xors[12], 1, 6,11,12, xors[ 9],xors[13], 2, 7, 8,13, xors[10],xors[14], 3, 4, 9,14, xors[11],xors[15]);
// 6{ 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11 }
xors[ 0] = z[ 5]; xors[ 1] = c_x[i++]; xors[ 2] = z[13]; xors[ 3] = c_x[i++];
xors[ 4] = z[12]; xors[ 5] = c_x[i++]; xors[ 6] = c_x[i++]; xors[ 7] = z[ 4];
xors[ 8] = c_x[i++]; xors[ 9] = z[ 3]; xors[10] = z[ 2]; xors[11] = z[11];
xors[12] = z[ 0]; xors[13] = z[ 6]^nonce; xors[14] = c_x[i++]; xors[15] = z[ 8];
GSn4(0, 4, 8,12, xors[ 0],xors[ 4], 1, 5, 9,13, xors[ 1],xors[ 5], 2, 6,10,14, xors[ 2],xors[ 6], 3, 7,11,15, xors[ 3],xors[ 7]);
GSn4(0, 5,10,15, xors[ 8],xors[12], 1, 6,11,12, xors[ 9],xors[13], 2, 7, 8,13, xors[10],xors[14], 3, 4, 9,14, xors[11],xors[15]);
// 7{ 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 }
xors[ 0] = c_x[i++]; xors[ 1] = z[14]; xors[ 2] = z[ 1]; xors[ 3] = nonce^z[ 9];
xors[ 4] = z[13]; xors[ 5] = z[ 7]; xors[ 6] = c_x[i++]; xors[ 7] = z[ 3];
xors[ 8] = z[ 0]; xors[ 9] = c_x[i++]; xors[10] = z[ 6]; xors[11] = c_x[i++];
xors[12] = c_x[i++]; xors[13] = c_x[i++]; xors[14] = z[ 8]; xors[15] = z[ 2];
GSn4(0, 4, 8,12, xors[ 0],xors[ 4], 1, 5, 9,13, xors[ 1],xors[ 5], 2, 6,10,14, xors[ 2],xors[ 6], 3, 7,11,15, xors[ 3],xors[ 7]);
GSn4(0, 5,10,15, xors[ 8],xors[12], 1, 6,11,12, xors[ 9],xors[13], 2, 7, 8,13, xors[10],xors[14], 3, 4, 9,14, xors[11],xors[15]);
// 8{ 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5 }
xors[ 0] = z[15]; xors[ 1] = z[ 9]; xors[ 2] = z[ 3]; xors[ 3] = c_x[i++];
xors[ 4] = c_x[i++]; xors[ 5] = z[14]; xors[ 6] = z[11]^nonce; xors[ 7] = z[ 0];
xors[ 8] = z[ 2]; xors[ 9] = c_x[i++]; xors[10] = c_x[i++]; xors[11] = z[ 5];
xors[12] = c_x[i++]; xors[13] = z[13]; xors[14] = c_x[i++]; xors[15] = z[10];
GSn4(0, 4, 8,12, xors[ 0],xors[ 4], 1, 5, 9,13, xors[ 1],xors[ 5], 2, 6,10,14, xors[ 2],xors[ 6], 3, 7,11,15, xors[ 3],xors[ 7]);
GSn4(0, 5,10,15, xors[ 8],xors[12], 1, 6,11,12, xors[ 9],xors[13], 2, 7, 8,13, xors[10],xors[14], 3, 4, 9,14, xors[11],xors[15]);
// 9{ 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13 , 0 }
xors[ 0] = z[ 2]; xors[ 1] = z[ 4]; xors[ 2] = z[ 6]; xors[ 3] = c_x[i++];
xors[ 4] = c_x[i++]; xors[ 5] = c_x[i++]; xors[ 6] = z[ 7]; xors[ 7] = z[ 1];
xors[ 8] = c_x[i++]; xors[ 9] = z[14]; xors[10] = nonce^z[12]; xors[11] = c_x[i++];
xors[12] = z[15]; xors[13] = z[ 9]; xors[14] = z[ 3]; xors[15] = c_x[i++];
GSn4(0, 4, 8,12, xors[ 0],xors[ 4], 1, 5, 9,13, xors[ 1],xors[ 5], 2, 6,10,14, xors[ 2],xors[ 6], 3, 7,11,15, xors[ 3],xors[ 7]);
GSn4(0, 5,10,15, xors[ 8],xors[12], 1, 6,11,12, xors[ 9],xors[13], 2, 7, 8,13, xors[10],xors[14], 3, 4, 9,14, xors[11],xors[15]);
i=0;
// 0{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }
xors[ 0] = c_x[i++]; xors[ 1] = c_x[i++]; xors[ 2] = c_x[i++]; xors[ 3] = z[ 7];
xors[ 4] = c_x[i++]; xors[ 5] = z[ 2]^nonce; xors[ 6] = z[ 4]; xors[ 7] = z[ 6];
xors[ 8] = z[ 9]; xors[ 9] = z[11]; xors[10] = z[13]; xors[11] = z[15];
xors[12] = z[ 8]; xors[13] = z[10]; xors[14] = c_x[i++]; xors[15] = c_x[i++];
GSn4(0, 4, 8,12, xors[ 0],xors[ 4], 1, 5, 9,13, xors[ 1],xors[ 5], 2, 6,10,14, xors[ 2],xors[ 6], 3, 7,11,15, xors[ 3],xors[ 7]);
GSn4(0, 5,10,15, xors[ 8],xors[12], 1, 6,11,12, xors[ 9],xors[13], 2, 7, 8,13, xors[10],xors[14], 3, 4, 9,14, xors[11],xors[15]);
// 1{ 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 }
xors[ 0] = z[10]; xors[ 1] = c_x[i++]; xors[ 2] = z[15]; xors[ 3] = c_x[i++];
xors[ 4] = z[14]; xors[ 5] = z[ 4]; xors[ 6] = c_x[i++]; xors[ 7] = z[13];
xors[ 8] = c_x[i++]; xors[ 9] = c_x[i++]; xors[10] = z[ 7]; xors[11] = z[ 3];
xors[12] = z[ 1]; xors[13] = c_x[i++]; xors[14] = z[11]; xors[15] = z[ 5]^nonce;
GSn4(0, 4, 8,12, xors[ 0],xors[ 4], 1, 5, 9,13, xors[ 1],xors[ 5], 2, 6,10,14, xors[ 2],xors[ 6], 3, 7,11,15, xors[ 3],xors[ 7]);
GSn4(0, 5,10,15, xors[ 8],xors[12], 1, 6,11,12, xors[ 9],xors[13], 2, 7, 8,13, xors[10],xors[14], 3, 4, 9,14, xors[11],xors[15]);
// 2{ 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 }
xors[ 0] = z[ 8]; xors[ 1] = z[ 0]; xors[ 2] = z[ 2]; xors[ 3] = c_x[i++];
xors[ 4] = z[11]; xors[ 5] = c_x[i++]; xors[ 6] = c_x[i++]; xors[ 7] = c_x[i++];
xors[ 8] = z[14]; xors[ 9] = nonce^z[ 6]; xors[10] = z[ 1]; xors[11] = z[ 4];
xors[12] = z[10]; xors[13] = z[ 3]; xors[14] = c_x[i++]; xors[15] = c_x[i++];
GSn4(0, 4, 8,12, xors[ 0],xors[ 4], 1, 5, 9,13, xors[ 1],xors[ 5], 2, 6,10,14, xors[ 2],xors[ 6], 3, 7,11,15, xors[ 3],xors[ 7]);
GSn4(0, 5,10,15, xors[ 8],xors[12], 1, 6,11,12, xors[ 9],xors[13], 2, 7, 8,13, xors[10],xors[14], 3, 4, 9,14, xors[11],xors[15]);
// 3{ 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 }
xors[ 0] = z[ 9]; xors[ 1] = nonce^z[ 1]; xors[ 2] = c_x[i++]; xors[ 3] = z[14];
xors[ 4] = z[ 7]; xors[ 5] = c_x[i++]; xors[ 6] = z[13]; xors[ 7] = z[11];
xors[ 8] = c_x[i++]; xors[ 9] = z[10]; xors[10] = c_x[i++]; xors[11] = c_x[i++];
xors[12] = z[ 2]; xors[13] = z[ 5]; xors[14] = c_x[i++]; xors[15] = z[15];
GSn4(0, 4, 8,12, xors[ 0],xors[ 4], 1, 5, 9,13, xors[ 1],xors[ 5], 2, 6,10,14, xors[ 2],xors[ 6], 3, 7,11,15, xors[ 3],xors[ 7]);
GSn4(0, 5,10,15, xors[ 8],xors[12], 1, 6,11,12, xors[ 9],xors[13], 2, 7, 8,13, xors[10],xors[14], 3, 4, 9,14, xors[11],xors[15]);
Hash[0*threads + thread] = make_uint2(cuda_swab32(xor3x(c_h[ 0],v[ 0],v[ 8])), cuda_swab32(xor3x(c_h[ 1],v[ 1],v[ 9])));
Hash[1*threads + thread] = make_uint2(cuda_swab32(xor3x(c_h[ 2],v[ 2],v[10])), cuda_swab32(xor3x(c_h[ 3],v[ 3],v[11])));
Hash[2*threads + thread] = make_uint2(cuda_swab32(xor3x(c_h[ 4],v[ 4],v[12])), cuda_swab32(xor3x(c_h[ 5],v[ 5],v[13])));
Hash[3*threads + thread] = make_uint2(cuda_swab32(xor3x(c_h[ 6],v[ 6],v[14])), cuda_swab32(xor3x(c_h[ 7],v[ 7],v[15])));
}
}
__host__
void blake256_14round_cpu_hash_80(const uint32_t threads, const uint32_t startNonce, uint2* d_Hash){
const dim3 grid((threads -1)/(TPB));
const dim3 block(TPB);
blake256_14round_gpu_hash_80 <<<grid, block>>> (threads, startNonce, d_Hash);
}
__host__
void blake256_14round_cpu_setBlock_80(const uint32_t *pdata){
uint32_t _ALIGN(64) h[8];
uint32_t _ALIGN(64) v[16];
uint32_t _ALIGN(64) data[20];
uint32_t _ALIGN(64) x[60];
memcpy(data, pdata, 80);
memcpy(h, c_IV256, sizeof(c_IV256));
blake256_14round_compress1st(h, pdata, 512);
cudaMemcpyToSymbol(c_h, h, 8*sizeof(uint32_t), 0);
const uint32_t m[16] = { pdata[16], pdata[17], pdata[18], 0,
0x80000000, 0, 0, 0,
0, 0, 0, 0,
0, 1, 0, 640
};
cudaMemcpyToSymbol(c_m, m, 16*sizeof(uint32_t), 0);
const uint32_t z[16] = {
0x243F6A88, 0x85A308D3, 0x13198A2E, 0x03707344, 0xA4093822, 0x299F31D0, 0x082EFA98, 0xEC4E6C89,
0x452821E6, 0x38D01377, 0xBE5466CF, 0x34E90C6C, 0xC0AC29B7, 0xC97C50DD, 0x3F84D5B5, 0xB5470917
};
v[ 0] = h[ 0]; v[ 1] = h[ 1]; v[ 2] = h[ 2]; v[ 3] = h[ 3];
v[ 4] = h[ 4]; v[ 5] = h[ 5]; v[ 6] = h[ 6]; v[ 7] = h[ 7];
v[ 8] = z[ 0]; v[ 9] = z[ 1]; v[10] = z[ 2]; v[11] = z[ 3];
v[12] = z[ 4] ^ 640; v[13] = z[ 5] ^ 640; v[14] = z[ 6]; v[15] = z[ 7];
hostGSn(0, 4, 8,12, 0, 1);
hostGSn(2, 6,10,14, 4, 5);
hostGSn(3, 7,11,15, 6, 7);
v[ 1]+= (m[ 2] ^ z[ 3]) + v[ 5];
v[13] = ROTR32(v[13] ^ v[ 1],16);
v[ 9] += v[13];
v[ 5] = ROTR32(v[ 5] ^ v[ 9],12);
v[ 1]+= v[ 5];
v[ 0]+= z[ 9];
v[ 2]+= z[13] + v[ 7];
v[ 3]+= z[15] + v[ 4];
v[14] = ROTL32(v[14] ^ v[ 3],16);
cudaMemcpyToSymbol(c_v, v, 16*sizeof(uint32_t), 0);
int i=0;
x[i++] = m[ 0]^z[ 1]; x[i++] = m[ 2]^z[ 3]; x[i++] = m[ 4]^z[ 5]; x[i++] = z[ 0]^m[ 1]; x[i++] = z[12]^m[13]; x[i++] = z[14]^m[15];
//1
x[i++] = m[ 4]^z[ 8]; x[i++] = m[13]^z[ 6]; x[i++] = z[ 9]^m[15]; x[i++] = m[ 1]^z[12]; x[i++] = m[ 0]^z[ 2]; x[i++] = z[ 0]^m[ 2];
//2
x[i++] = m[15]^z[13]; x[i++] = z[12]^m[ 0]; x[i++] = z[ 5]^m[ 2]; x[i++] = z[15]^m[13]; x[i++] = z[ 7]^m[ 1]; x[i++] = z[ 9]^m[ 4];
//3
x[i++] = z[12]^m[13]; x[i++] = z[ 3]^m[ 1]; x[i++] = m[ 2]^z[ 6]; x[i++] = m[ 4]^z[ 0]; x[i++] = m[15]^z[ 8]; x[i++] = z[ 4]^m[ 0];
//4
x[i++] = m[ 2]^z[ 4]; x[i++] = z[ 9]^m[ 0]; x[i++] = z[ 2]^m[ 4]; x[i++] = z[10]^m[15]; x[i++] = z[14]^m[ 1]; x[i++] = z[ 3]^m[13];
//5
x[i++] = m[ 2]^z[12]; x[i++] = m[ 0]^z[11]; x[i++] = m[ 4]^z[13]; x[i++] = z[14]^m[15]; x[i++] = m[ 1]^z[ 9]; x[i++] = z[ 4]^m[13];
//6
x[i++] = m[ 1]^z[15]; x[i++] = m[ 4]^z[10]; x[i++] = z[ 1]^m[15]; x[i++] = z[14]^m[13]; x[i++] = m[ 0]^z[ 7]; x[i++] = z[ 9]^m[ 2];
//7
x[i++] = m[13]^z[11]; x[i++] = z[12]^m[ 1]; x[i++] = m[15]^z[ 4]; x[i++] = m[ 2]^z[10]; x[i++] = z[ 5]^m[ 0]; x[i++] = z[15]^m[ 4];
//8
x[i++] = m[ 0]^z[ 8]; x[i++] = z[ 6]^m[15]; x[i++] = m[13]^z[ 7]; x[i++] = m[ 1]^z[ 4]; x[i++] = z[12]^m[ 2]; x[i++] = z[ 1]^m[ 4];
//9
x[i++] = m[ 1]^z[ 5]; x[i++] = z[10]^m[ 2]; x[i++] = z[ 8]^m[ 4]; x[i++] = m[15]^z[11]; x[i++] = m[13]^z[ 0]; x[i++] = z[13]^m[ 0];
cudaMemcpyToSymbol(c_x, x, i*sizeof(uint32_t), 0);
}
|
the_stack
|
#include "octnet/gpu/oc2col.h"
#include "octnet/gpu/gpu.h"
#include "octnet/gpu/buffer.h"
#include "octnet/core/z_curve.h"
__device__
inline bool oc2col_in_vol(const octree* in, const int d, const int h, const int w) {
return d >= 0 && h >= 0 && w >= 0 && d < 8 * in->grid_depth && h < 8 * in->grid_height && w < 8 * in->grid_width;
}
__device__
inline void oc2col_leaf(const octree* in, const ot_tree_t* leaf_tree, const int leaf_grid_idx, const int leaf_bit_idx,
const int n, const int ds, const int hs, const int ws, const int size,
ot_data_t* out_shared, ot_data_t* out) {
const ot_data_t factor = 1.f / (size * size * size);
int d,h,w, kidx, grid_idx, bit_idx, data_idx, data_cnt, data_cnt_e1, data_cnt_e2;
ot_tree_t* tree;
ot_data_t* data_in;
ot_data_t val_in, val;
for(int f = 0; f < in->feature_size; ++f) {
//leaf data
data_idx = tree_data_idx(leaf_tree, leaf_bit_idx, in->feature_size);
// data_in = in->data_ptrs[leaf_grid_idx] + data_idx;
data_in = octree_get_data(in, leaf_grid_idx) + data_idx;
val_in = data_in[f];
//1-center
val = size*size*size * factor * val_in;
// (1,1,1)=13
out_shared[13] = val;
//6
val = (size-1)*size*size * factor * val_in;
//(0,1,1)=4, (2,1,1)=22, (1,0,1)=10, (1,2,1)=16, (1,1,0)=12, (1,1,2)=14
out_shared[ 4] = val;
out_shared[10] = val;
out_shared[12] = val;
out_shared[14] = val;
out_shared[16] = val;
out_shared[22] = val;
//8
val = (size-1)*(size-1)*(size-1) * factor * val_in;
//(0,0,0)=0, (0,0,2)=2, (0,2,0)=6, (0,2,2)=8,
//(2,0,0)=18, (2,0,2)=20, (2,2,0)=24, (2,2,2)=26
out_shared[ 0] = val;
out_shared[ 2] = val;
out_shared[ 6] = val;
out_shared[ 8] = val;
out_shared[18] = val;
out_shared[20] = val;
out_shared[24] = val;
out_shared[26] = val;
//12
val = (size-1)*(size-1)*(size) * factor * val_in;
//(0,0,1)=1, (0,1,0)=3, (0,1,2)=5, (0,2,1)=7
//(1,0,0)=9, (1,0,2)=11, (1,2,0)=15, (1,2,2)=17
//(2,0,1)=19, (2,1,0)=21, (2,1,2)=23, (2,2,1)=25
out_shared[ 1] = val;
out_shared[ 3] = val;
out_shared[ 5] = val;
out_shared[ 7] = val;
out_shared[ 9] = val;
out_shared[11] = val;
out_shared[15] = val;
out_shared[17] = val;
out_shared[19] = val;
out_shared[21] = val;
out_shared[23] = val;
out_shared[25] = val;
//corner data
for(int cd = 0; cd < 2; ++cd) {
for(int ch = 0; ch < 2; ++ch) {
for(int cw = 0; cw < 2; ++cw) {
d = ds + (cd*(size+1)-1); h = hs + (ch*(size+1)-1); w = ws + (cw*(size+1)-1);
if(oc2col_in_vol(in, d,h,w)) {
grid_idx = octree_grid_idx(in, n, d / 8, h / 8, w / 8);
tree = octree_get_tree(in, grid_idx);
data_in = octree_get_data(in, grid_idx);
bit_idx = tree_bit_idx(tree, d % 8, h % 8, w % 8);
data_idx = tree_data_idx(tree, bit_idx, in->feature_size);
kidx = (cd*2*3 + ch*2)*3 + cw*2;
out_shared[kidx] += factor * data_in[data_idx + f];
}
}
}
}
//along the edges
//d
for(int ch = 0; ch < 2; ++ch) {
for(int cw = 0; cw < 2; ++cw) {
d = ds; h = hs + (ch*(size+1)-1); w = ws + (cw*(size+1)-1);
if(oc2col_in_vol(in, d,h,w)) {
grid_idx = octree_grid_idx(in, n, d / 8, h / 8, w / 8);
tree = octree_get_tree(in, grid_idx);
data_in = octree_get_data(in, grid_idx);
int e = 0;
while(e < size) {
d = ds + e;
bit_idx = tree_bit_idx(tree, d % 8, h % 8, w % 8);
data_cnt = bit_idx == 0 ? 8 : (bit_idx < 9 ? 4 : (bit_idx < 73 ? 2 : 1));
data_cnt = IMIN(size - e, data_cnt);
data_idx = tree_data_idx(tree, bit_idx, in->feature_size);
val = factor * data_in[data_idx + f];
kidx = ((0) * 3 + (ch*2)) * 3 + (cw*2);
out_shared[kidx] += (data_cnt - (e+data_cnt >= size)) * val;
kidx = ((1) * 3 + (ch*2)) * 3 + (cw*2);
out_shared[kidx] += data_cnt * val;
kidx = ((2) * 3 + (ch*2)) * 3 + (cw*2);
out_shared[kidx] += (data_cnt - (e == 0)) * val;
e += data_cnt;
}
}
}
}
//h
for(int cd = 0; cd < 2; ++cd) {
for(int cw = 0; cw < 2; ++cw) {
d = ds + (cd*(size+1)-1); h = hs; w = ws + (cw*(size+1)-1);
if(oc2col_in_vol(in, d,h,w)) {
grid_idx = octree_grid_idx(in, n, d / 8, h / 8, w / 8);
tree = octree_get_tree(in, grid_idx);
data_in = octree_get_data(in, grid_idx);
int e = 0;
while(e < size) {
h = hs + e;
bit_idx = tree_bit_idx(tree, d % 8, h % 8, w % 8);
data_cnt = bit_idx == 0 ? 8 : (bit_idx < 9 ? 4 : (bit_idx < 73 ? 2 : 1));
data_cnt = IMIN(size - e, data_cnt);
data_idx = tree_data_idx(tree, bit_idx, in->feature_size);
val = factor * data_in[data_idx + f];
kidx = ((cd*2) * 3 + (0)) * 3 + (cw*2);
out_shared[kidx] += (data_cnt - (e+data_cnt >= size)) * val;
kidx = ((cd*2) * 3 + (1)) * 3 + (cw*2);
out_shared[kidx] += data_cnt * val;
kidx = ((cd*2) * 3 + (2)) * 3 + (cw*2);
out_shared[kidx] += (data_cnt - (e == 0)) * val;
e += data_cnt;
}
}
}
}
//w
for(int cd = 0; cd < 2; ++cd) {
for(int ch = 0; ch < 2; ++ch) {
d = ds + (cd*(size+1)-1); h = hs + (ch*(size+1)-1); w = ws;
if(oc2col_in_vol(in, d,h,w)) {
grid_idx = octree_grid_idx(in, n, d / 8, h / 8, w / 8);
tree = octree_get_tree(in, grid_idx);
data_in = octree_get_data(in, grid_idx);
int e = 0;
while(e < size) {
w = ws + e;
bit_idx = tree_bit_idx(tree, d % 8, h % 8, w % 8);
data_cnt = bit_idx == 0 ? 8 : (bit_idx < 9 ? 4 : (bit_idx < 73 ? 2 : 1));
data_cnt = IMIN(size - e, data_cnt);
data_idx = tree_data_idx(tree, bit_idx, in->feature_size);
val = factor * data_in[data_idx + f];
kidx = ((cd*2) * 3 + (ch*2)) * 3 + (0);
out_shared[kidx] += (data_cnt - (e+data_cnt >= size)) * val;
kidx = ((cd*2) * 3 + (ch*2)) * 3 + (1);
out_shared[kidx] += data_cnt * val;
kidx = ((cd*2) * 3 + (ch*2)) * 3 + (2);
out_shared[kidx] += (data_cnt - (e == 0)) * val;
e += data_cnt;
}
}
}
}
//along the faces
//d
for(int fd = 0; fd < 2; ++fd) {
d = ds + (fd*(size+1)-1); h = hs; w = ws;
if(oc2col_in_vol(in, d,h,w)) {
grid_idx = octree_grid_idx(in, n, d / 8, h / 8, w / 8);
tree = octree_get_tree(in, grid_idx);
data_in = octree_get_data(in, grid_idx);
int z = 0;
while(z < size * size) {
const int e1 = z_curve_x(z);
const int e2 = z_curve_y(z);
h = hs + e1;
w = ws + e2;
bit_idx = tree_bit_idx(tree, d % 8, h % 8, w % 8);
data_cnt = bit_idx == 0 ? 8 : (bit_idx < 9 ? 4 : (bit_idx < 73 ? 2 : 1));
data_cnt_e1 = IMIN(size - e1, data_cnt);
data_cnt_e2 = IMIN(size - e2, data_cnt);
data_cnt = IMIN(size * size - z, data_cnt * data_cnt);
data_idx = tree_data_idx(tree, bit_idx, in->feature_size);
val = factor * data_in[data_idx + f];
kidx = ((fd*2) * 3 + (0)) * 3 + (0);
out_shared[kidx] += (data_cnt_e1 - (e1+data_cnt_e1 >= size)) * (data_cnt_e2 - (e2+data_cnt_e2 >= size)) * val;
kidx = ((fd*2) * 3 + (0)) * 3 + (1);
out_shared[kidx] += (data_cnt_e1 - (e1+data_cnt_e1 >= size)) * (data_cnt_e2) * val;
kidx = ((fd*2) * 3 + (0)) * 3 + (2);
out_shared[kidx] += (data_cnt_e1 - (e1+data_cnt_e1 >= size)) * (data_cnt_e2 - (e2 == 0)) * val;
kidx = ((fd*2) * 3 + (1)) * 3 + (0);
out_shared[kidx] += (data_cnt_e1) * (data_cnt_e2 - (e2+data_cnt_e2 >= size)) * val;
kidx = ((fd*2) * 3 + (1)) * 3 + (1);
out_shared[kidx] += data_cnt * val;
kidx = ((fd*2) * 3 + (1)) * 3 + (2);
out_shared[kidx] += (data_cnt_e1) * (data_cnt_e2 - (e2 == 0)) * val;
kidx = ((fd*2) * 3 + (2)) * 3 + (0);
out_shared[kidx] += (data_cnt_e1 - (e1 == 0)) * (data_cnt_e2 - (e2+data_cnt_e2 >= size)) * val;
kidx = ((fd*2) * 3 + (2)) * 3 + (1);
out_shared[kidx] += (data_cnt_e1 - (e1 == 0)) * (data_cnt_e2) * val;
kidx = ((fd*2) * 3 + (2)) * 3 + (2);
out_shared[kidx] += (data_cnt_e1 - (e1 == 0)) * (data_cnt_e2 - (e2 == 0)) * val;
z += data_cnt;
}
}
}
//h
for(int fh = 0; fh < 2; ++fh) {
d = ds; h = hs + (fh*(size+1)-1); w = ws;
if(oc2col_in_vol(in, d,h,w)) {
grid_idx = octree_grid_idx(in, n, d / 8, h / 8, w / 8);
tree = octree_get_tree(in, grid_idx);
data_in = octree_get_data(in, grid_idx);
int z = 0;
while(z < size * size) {
const int e1 = z_curve_x(z);
const int e2 = z_curve_y(z);
d = ds + e1;
w = ws + e2;
bit_idx = tree_bit_idx(tree, d % 8, h % 8, w % 8);
data_cnt = bit_idx == 0 ? 8 : (bit_idx < 9 ? 4 : (bit_idx < 73 ? 2 : 1));
data_cnt_e1 = IMIN(size - e1, data_cnt);
data_cnt_e2 = IMIN(size - e2, data_cnt);
data_cnt = IMIN(size * size - z, data_cnt * data_cnt);
data_idx = tree_data_idx(tree, bit_idx, in->feature_size);
val = factor * data_in[data_idx + f];
kidx = ((0) * 3 + (fh*2)) * 3 + (0);
out_shared[kidx] += (data_cnt_e1 - (e1+data_cnt_e1 >= size)) * (data_cnt_e2 - (e2+data_cnt_e2 >= size)) * val;
kidx = ((0) * 3 + (fh*2)) * 3 + (1);
out_shared[kidx] += (data_cnt_e1 - (e1+data_cnt_e1 >= size)) * (data_cnt_e2) * val;
kidx = ((0) * 3 + (fh*2)) * 3 + (2);
out_shared[kidx] += (data_cnt_e1 - (e1+data_cnt_e1 >= size)) * (data_cnt_e2 - (e2 == 0)) * val;
kidx = ((1) * 3 + (fh*2)) * 3 + (0);
out_shared[kidx] += (data_cnt_e1) * (data_cnt_e2 - (e2+data_cnt_e2 >= size)) * val;
kidx = ((1) * 3 + (fh*2)) * 3 + (1);
out_shared[kidx] += data_cnt * val;
kidx = ((1) * 3 + (fh*2)) * 3 + (2);
out_shared[kidx] += (data_cnt_e1) * (data_cnt_e2 - (e2 == 0)) * val;
kidx = ((2) * 3 + (fh*2)) * 3 + (0);
out_shared[kidx] += (data_cnt_e1 - (e1 == 0)) * (data_cnt_e2 - (e2+data_cnt_e2 >= size)) * val;
kidx = ((2) * 3 + (fh*2)) * 3 + (1);
out_shared[kidx] += (data_cnt_e1 - (e1 == 0)) * (data_cnt_e2) * val;
kidx = ((2) * 3 + (fh*2)) * 3 + (2);
out_shared[kidx] += (data_cnt_e1 - (e1 == 0)) * (data_cnt_e2 - (e2 == 0)) * val;
z += data_cnt;
}
}
}
//w
for(int fw = 0; fw < 2; ++fw) {
d = ds; h = hs; w = ws + (fw*(size+1)-1);
if(oc2col_in_vol(in, d,h,w)) {
grid_idx = octree_grid_idx(in, n, d / 8, h / 8, w / 8);
tree = octree_get_tree(in, grid_idx);
data_in = octree_get_data(in, grid_idx);
int z = 0;
while(z < size * size) {
const int e1 = z_curve_x(z);
const int e2 = z_curve_y(z);
d = ds + e1;
h = hs + e2;
bit_idx = tree_bit_idx(tree, d % 8, h % 8, w % 8);
data_cnt = bit_idx == 0 ? 8 : (bit_idx < 9 ? 4 : (bit_idx < 73 ? 2 : 1));
data_cnt_e1 = IMIN(size - e1, data_cnt);
data_cnt_e2 = IMIN(size - e2, data_cnt);
data_cnt = IMIN(size * size - z, data_cnt * data_cnt);
data_idx = tree_data_idx(tree, bit_idx, in->feature_size);
val = factor * data_in[data_idx + f];
kidx = ((0) * 3 + (0)) * 3 + (fw*2);
out_shared[kidx] += (data_cnt_e1 - (e1+data_cnt_e1 >= size)) * (data_cnt_e2 - (e2+data_cnt_e2 >= size)) * val;
kidx = ((0) * 3 + (1)) * 3 + (fw*2);
out_shared[kidx] += (data_cnt_e1 - (e1+data_cnt_e1 >= size)) * (data_cnt_e2) * val;
kidx = ((0) * 3 + (2)) * 3 + (fw*2);
out_shared[kidx] += (data_cnt_e1 - (e1+data_cnt_e1 >= size)) * (data_cnt_e2 - (e2 == 0)) * val;
kidx = ((1) * 3 + (0)) * 3 + (fw*2);
out_shared[kidx] += (data_cnt_e1) * (data_cnt_e2 - (e2+data_cnt_e2 >= size)) * val;
kidx = ((1) * 3 + (1)) * 3 + (fw*2);
out_shared[kidx] += data_cnt * val;
kidx = ((1) * 3 + (2)) * 3 + (fw*2);
out_shared[kidx] += (data_cnt_e1) * (data_cnt_e2 - (e2 == 0)) * val;
kidx = ((2) * 3 + (0)) * 3 + (fw*2);
out_shared[kidx] += (data_cnt_e1 - (e1 == 0)) * (data_cnt_e2 - (e2+data_cnt_e2 >= size)) * val;
kidx = ((2) * 3 + (1)) * 3 + (fw*2);
out_shared[kidx] += (data_cnt_e1 - (e1 == 0)) * (data_cnt_e2) * val;
kidx = ((2) * 3 + (2)) * 3 + (fw*2);
out_shared[kidx] += (data_cnt_e1 - (e1 == 0)) * (data_cnt_e2 - (e2 == 0)) * val;
z += data_cnt;
}
}
}
//copy shared to global
for(kidx = 0; kidx < K333; ++kidx) {
out[f * K333 + kidx] = out_shared[kidx];
}
}
}
__global__ void kernel_oc2col_leafs(ot_data_t* col_buffer, const octree in, int leafs_offset, int n_leafs) {
extern __shared__ ot_data_t out_shared[];
const int out_inc = K333 * in.feature_size;
CUDA_KERNEL_LOOP(leaf_idx, n_leafs) {
leaf_idx = leaf_idx + leafs_offset;
// const int grid_idx = col_buffer[leaf_idx * out_inc];
const int grid_idx = leaf_idx_to_grid_idx(&in, leaf_idx);
const ot_tree_t* tree = octree_get_tree(&in, grid_idx);
// const int cum_n_leafs = n_leafs_upto(&in, grid_idx);
const int cum_n_leafs = in.prefix_leafs[grid_idx];
const int data_idx = leaf_idx - cum_n_leafs;
const int bit_idx = data_idx_to_bit_idx(tree, data_idx);
int n,d,h,w;
const int depth = octree_ind_to_dense_ind(&in, grid_idx, bit_idx, &n, &d,&h,&w);
const int size = width_from_depth(depth);
const int col_buffer_idx = (cum_n_leafs - leafs_offset) * out_inc + tree_data_idx(tree, bit_idx, out_inc);
oc2col_leaf(&in, tree, grid_idx, bit_idx, n,d,h,w,size, out_shared + threadIdx.x * K333, col_buffer + col_buffer_idx);
}
}
void oc2col_gpu(const octree* in, ot_data_t* col_buffer, ot_size_t col_buffer_capacity, int leafs_offset, int n_leafs) {
if(DEBUG) { printf("[DEBUG] oc2col_gpu n_blocks=%d, n_leafs %d\n", octree_num_blocks(in), in->n_leafs); }
const int n_blocks = octree_num_blocks(in);
// octree_leaf_idx_to_grid_idx_gpu(in, K333 * in->feature_size, col_buffer_capacity, col_buffer);
kernel_oc2col_leafs<<<GET_BLOCKS_T(n_leafs, 256), 256, 256 * K333 * sizeof(ot_data_t)>>>(col_buffer, *in, leafs_offset, n_leafs);
CUDA_POST_KERNEL_CHECK;
}
|
the_stack
|
#if ( defined GRAVITY && defined GPU && POT_SCHEME == SOR && !defined USE_PSOLVER_10TO14 )
#define POT_NXT_F ( PATCH_SIZE+2*POT_GHOST_SIZE )
#define POT_NTHREAD ( RHO_NXT*RHO_NXT*POT_BLOCK_SIZE_Z/2 )
#define POT_USELESS ( POT_GHOST_SIZE%2 )
#define POT_USELESS2 ( POT_USELESS^(GRA_GHOST_SIZE&1) )
#define POT_NXT_INT ( (POT_NXT-2)*2 )
#define ip ( PotCen + Disp15 )
#define im ( PotCen + Disp14 )
#define jp ( PotCen + POT_NXT_F/2 )
#define jm ( PotCen - POT_NXT_F/2 )
#define kp ( PotCen + dz )
#define km ( PotCen - dz )
// additional "__syncthreads" function must be called for POT_GHOST_SIZE == 4 or the emulation mode
#if ( POT_GHOST_SIZE == 4 || defined __DEVICE_EMULATION__ )
#define SYNCTHREADS() __syncthreads()
#else
#define SYNCTHREADS()
#endif
// variables reside in constant memory
#include "CUDA_ConstMemory.h"
//-------------------------------------------------------------------------------------------------------
// Function : CUPOT_PoissonSolver_SOR_16to18cube
// Description : GPU Poisson solver using the SOR scheme
//
// Note : a. Work for POT_GHOST_SIZE = 4, 5 <--> POT_NXT_F = 16, 18
// b. Prefix "g" for pointers pointing to the "Global" memory space
// Prefix "s" for pointers pointing to the "Shared" memory space
// c. We do not use automatic arrays to prevent from using the high-latency local memory
// --> unroll loops manually ...
// d. Reference : Numerical Recipes, Chapter 20.5
//
// Parameter : g_Rho_Array : Global memory array to store the input density
// g_Pot_Array_In : Global memory array storing the input "coarse-grid" potential for
// interpolation
// g_Pot_Array_Out : Global memory array to store the output potential
// Min_Iter : Minimum # of iterations for SOR
// Max_Iter : Maximum # of iterations for SOR
// Omega_6 : Omega / 6
// Const : (Coefficient in front of the RHS in the Poisson eq.) / dh^2
// IntScheme : Interpolation scheme for potential
// --> currently supported schemes include
// INT_CQUAD : conservative quadratic interpolation
// INT_QUAD : quadratic interpolation
//---------------------------------------------------------------------------------------------------
__global__ void CUPOT_PoissonSolver_SOR_16to18cube( const real g_Rho_Array [][ RHO_NXT*RHO_NXT*RHO_NXT ],
const real g_Pot_Array_In [][ POT_NXT*POT_NXT*POT_NXT ],
real g_Pot_Array_Out[][ GRA_NXT*GRA_NXT*GRA_NXT ],
const int Min_Iter, const int Max_Iter, const real Omega_6,
const real Const, const IntScheme_t IntScheme )
{
const uint bx = blockIdx.x;
const uint tx = threadIdx.x;
const uint ty = threadIdx.y;
const uint ID0 = ty*blockDim.x + tx; // the 1-D index of thread
const uint Disp1 = ty&1; // ty = (odd,even) <--> Disp1 = ( 1, 0)
const uint Disp2 = Disp1^1; // ty = (odd,even) <--> Disp2 = ( 0, 1)
const int Disp3 = -1 + (int)(Disp2<<1); // ty = (odd,even) <--> Disp3 = (-1,+1)
const int Disp7 = -Disp1; // ty = (odd,even) <--> Disp7 = (-1, 0)
const int Disp12 = -Disp3; // ty = (odd,even) <--> Disp12 = (+1,-1)
const int Disp13 = -Disp2; // ty = (odd,even) <--> Disp12 = ( 0,-1)
const uint dz = POT_NXT_F*POT_NXT_F/2;
const uint RhoCen0 = ty*RHO_NXT + (tx<<1); // the index of rho
const uint PotCen0 = dz + __umul24(1+ty, POT_NXT_F/2) + tx; // the index of the left potential
const uint FloorPow2 = 1<<(31-__clz(POT_NTHREAD) ); // FloorPow2: largest power-of-two value not
const uint Remain = POT_NTHREAD - FloorPow2; // greater than POT_NTHREAD
real BPot_xy1, BPot_xy2, BPot_yz1, BPot_yz2, BPot_xz1, BPot_xz2; // boundary potential stored in registers
real RPot0, RPot1, RPot2, RPot3, RPot4, RPot5, RPot6, RPot7, RPot8; // internal potential stored in registers
real RPot9, RPot10, RPot11, RPot12, RPot13; // internal potential stored in registers
# if ( POT_GHOST_SIZE == 5 )
real RPot14, RPot15; // internal potential stored in registers
# endif
real Residual, Residual_Total_Old, Temp, Temp1, Temp2;
uint ID1, ID2, ID3, RhoCen, PotCen, SendID, RecvID, Disp4, Disp5;
int Disp6, Disp8, Disp9, Disp10, Disp11, Disp14, Disp15, Disp16;
int ii, jj, Idx, Idy;
__shared__ real s_FPot [POT_NXT_F*POT_NXT_F*POT_NXT_F/2];
__shared__ real s_CPot1 [POT_NXT *POT_NXT ];
__shared__ real s_CPot2 [POT_NXT *POT_NXT ];
__shared__ real s_CPot3 [POT_NXT *POT_NXT ];
__shared__ real s_IntPot[POT_NXT_INT*POT_NXT_INT];
__shared__ real s_Residual_Total[POT_NTHREAD];
real *s_CPot_z1, *s_CPot_z2, *s_CPot_z3, *s_Temp;
// a. evaluate the "fine-grid" potential by interpolation (as the initial guess and the B.C.)
// ---------------------------------------------------------------------------------------------------------
const real Const_8 = 1.0/8.0;
const real Const_64 = 1.0/64.0;
const real Const_512 = 1.0/512.0;
const int Cdx = 1;
const int Cdy = POT_NXT;
const int CIDx = 1 + ID0 % ( POT_NXT-2 );
const int CIDy = 1 + ID0 / ( POT_NXT-2 );
const int CID = __mul24( CIDy, Cdy ) + __mul24( CIDx, Cdx );
const int Fdx = 1;
const int Fdy = POT_NXT_INT;
const int FIDx = (CIDx-1)*2;
const int FIDy = (CIDy-1)*2;
const int FID = __mul24( FIDy, Fdy ) + __mul24( FIDx, Fdx );
real Slope_00, Slope_01, Slope_02, Slope_03, Slope_04, Slope_05, Slope_06, Slope_07;
real Slope_08, Slope_09, Slope_10, Slope_11, Slope_12;
// first we load three slices of the coarse-grid potential into the shared memory
s_CPot_z1 = s_CPot1;
s_CPot_z2 = s_CPot2;
s_CPot_z3 = s_CPot3;
for (uint t=ID0; t<POT_NXT*POT_NXT; t+=POT_NTHREAD)
{
ID1 = t + 0*POT_NXT*POT_NXT;
s_CPot_z1[t] = g_Pot_Array_In[bx][ID1];
}
__syncthreads();
for (uint t=ID0; t<POT_NXT*POT_NXT; t+=POT_NTHREAD)
{
ID1 = t + 1*POT_NXT*POT_NXT;
s_CPot_z2[t] = g_Pot_Array_In[bx][ID1];
}
__syncthreads();
for (uint t=ID0; t<POT_NXT*POT_NXT; t+=POT_NTHREAD)
{
ID1 = t + 2*POT_NXT*POT_NXT;
s_CPot_z3[t] = g_Pot_Array_In[bx][ID1];
}
__syncthreads();
// (a1). interpolation : the lowest z plane
// ===========================================================================
if ( ID0 < (POT_NXT-2)*(POT_NXT-2) )
{
switch ( IntScheme )
{
/*
case INT_CENTRAL :
{
Slope_00 = (real)0.125 * ( s_CPot_z2[CID+Cdx] - s_CPot_z2[CID-Cdx] );
Slope_01 = (real)0.125 * ( s_CPot_z2[CID+Cdy] - s_CPot_z2[CID-Cdy] );
Slope_02 = (real)0.125 * ( s_CPot_z3[CID ] - s_CPot_z1[CID ] );
#if ( POT_GHOST_SIZE == 4 ) // lower plane
s_IntPot[FID ] = s_CPot_z2[CID] - Slope_00 - Slope_01 - Slope_02;
s_IntPot[FID+Fdx ] = s_CPot_z2[CID] + Slope_00 - Slope_01 - Slope_02;
s_IntPot[FID +Fdy] = s_CPot_z2[CID] - Slope_00 + Slope_01 - Slope_02;
s_IntPot[FID+Fdx+Fdy] = s_CPot_z2[CID] + Slope_00 + Slope_01 - Slope_02;
#else // upper plane
s_IntPot[FID ] = s_CPot_z2[CID] - Slope_00 - Slope_01 + Slope_02;
s_IntPot[FID+Fdx ] = s_CPot_z2[CID] + Slope_00 - Slope_01 + Slope_02;
s_IntPot[FID +Fdy] = s_CPot_z2[CID] - Slope_00 + Slope_01 + Slope_02;
s_IntPot[FID+Fdx+Fdy] = s_CPot_z2[CID] + Slope_00 + Slope_01 + Slope_02;
#endif
}
break; // INT_CENTRAL
*/
case INT_CQUAD :
{
Slope_00 = Const_8 * ( s_CPot_z2[CID+Cdx ] - s_CPot_z2[CID-Cdx ] );
Slope_01 = Const_8 * ( s_CPot_z2[CID+Cdy ] - s_CPot_z2[CID-Cdy ] );
Slope_02 = Const_8 * ( s_CPot_z3[CID ] - s_CPot_z1[CID ] );
Slope_03 = Const_64 * ( s_CPot_z1[CID+Cdx ] - s_CPot_z1[CID-Cdx ] );
Slope_04 = Const_64 * ( s_CPot_z1[CID +Cdy] - s_CPot_z1[CID -Cdy] );
Slope_05 = Const_64 * ( s_CPot_z2[CID+Cdx-Cdy] - s_CPot_z2[CID-Cdx-Cdy] );
Slope_06 = Const_64 * ( s_CPot_z2[CID+Cdx+Cdy] - s_CPot_z2[CID-Cdx+Cdy] );
Slope_07 = Const_64 * ( s_CPot_z3[CID+Cdx ] - s_CPot_z3[CID-Cdx ] );
Slope_08 = Const_64 * ( s_CPot_z3[CID +Cdy] - s_CPot_z3[CID -Cdy] );
Slope_09 = Const_512 * ( s_CPot_z1[CID+Cdx-Cdy] - s_CPot_z1[CID-Cdx-Cdy] );
Slope_10 = Const_512 * ( s_CPot_z1[CID+Cdx+Cdy] - s_CPot_z1[CID-Cdx+Cdy] );
Slope_11 = Const_512 * ( s_CPot_z3[CID+Cdx-Cdy] - s_CPot_z3[CID-Cdx-Cdy] );
Slope_12 = Const_512 * ( s_CPot_z3[CID+Cdx+Cdy] - s_CPot_z3[CID-Cdx+Cdy] );
#if ( POT_GHOST_SIZE == 4 ) // lower plane
s_IntPot[FID ] = - Slope_00 - Slope_01 - Slope_02 - Slope_03 - Slope_04 - Slope_05 + Slope_06
+ Slope_07 + Slope_08 - Slope_09 + Slope_10 + Slope_11 - Slope_12
+ s_CPot_z2[CID];
s_IntPot[FID+Fdx ] = + Slope_00 - Slope_01 - Slope_02 + Slope_03 - Slope_04 + Slope_05 - Slope_06
- Slope_07 + Slope_08 + Slope_09 - Slope_10 - Slope_11 + Slope_12
+ s_CPot_z2[CID];
s_IntPot[FID +Fdy] = - Slope_00 + Slope_01 - Slope_02 - Slope_03 + Slope_04 + Slope_05 - Slope_06
+ Slope_07 - Slope_08 + Slope_09 - Slope_10 - Slope_11 + Slope_12
+ s_CPot_z2[CID];
s_IntPot[FID+Fdx+Fdy] = + Slope_00 + Slope_01 - Slope_02 + Slope_03 + Slope_04 - Slope_05 + Slope_06
- Slope_07 - Slope_08 - Slope_09 + Slope_10 + Slope_11 - Slope_12
+ s_CPot_z2[CID];
#else // upper plane
s_IntPot[FID ] = - Slope_00 - Slope_01 + Slope_02 + Slope_03 + Slope_04 - Slope_05 + Slope_06
- Slope_07 - Slope_08 + Slope_09 - Slope_10 - Slope_11 + Slope_12
+ s_CPot_z2[CID];
s_IntPot[FID+Fdx ] = + Slope_00 - Slope_01 + Slope_02 - Slope_03 + Slope_04 + Slope_05 - Slope_06
+ Slope_07 - Slope_08 - Slope_09 + Slope_10 + Slope_11 - Slope_12
+ s_CPot_z2[CID];
s_IntPot[FID +Fdy] = - Slope_00 + Slope_01 + Slope_02 + Slope_03 - Slope_04 + Slope_05 - Slope_06
- Slope_07 + Slope_08 - Slope_09 + Slope_10 + Slope_11 - Slope_12
+ s_CPot_z2[CID];
s_IntPot[FID+Fdx+Fdy] = + Slope_00 + Slope_01 + Slope_02 - Slope_03 - Slope_04 - Slope_05 + Slope_06
+ Slope_07 + Slope_08 + Slope_09 - Slope_10 - Slope_11 + Slope_12
+ s_CPot_z2[CID];
#endif
}
break; // INT_CQUAD
case INT_QUAD :
{
s_IntPot[FID ] = (real)0.0;
s_IntPot[FID+Fdx ] = (real)0.0;
s_IntPot[FID +Fdy] = (real)0.0;
s_IntPot[FID+Fdx+Fdy] = (real)0.0;
for (int dj=-1; dj<=1; dj++) { Idy = dj+1; jj = __mul24( dj, Cdy );
for (int di=-1; di<=1; di++) { Idx = di+1; ii = __mul24( di, Cdx );
#if ( POT_GHOST_SIZE == 4 ) // lower plane
s_IntPot[FID ] += s_CPot_z1[CID+jj+ii] * c_Mm[0] * c_Mm[Idy] * c_Mm[Idx];
s_IntPot[FID ] += s_CPot_z2[CID+jj+ii] * c_Mm[1] * c_Mm[Idy] * c_Mm[Idx];
s_IntPot[FID ] += s_CPot_z3[CID+jj+ii] * c_Mm[2] * c_Mm[Idy] * c_Mm[Idx];
s_IntPot[FID+Fdx ] += s_CPot_z1[CID+jj+ii] * c_Mm[0] * c_Mm[Idy] * c_Mp[Idx];
s_IntPot[FID+Fdx ] += s_CPot_z2[CID+jj+ii] * c_Mm[1] * c_Mm[Idy] * c_Mp[Idx];
s_IntPot[FID+Fdx ] += s_CPot_z3[CID+jj+ii] * c_Mm[2] * c_Mm[Idy] * c_Mp[Idx];
s_IntPot[FID +Fdy] += s_CPot_z1[CID+jj+ii] * c_Mm[0] * c_Mp[Idy] * c_Mm[Idx];
s_IntPot[FID +Fdy] += s_CPot_z2[CID+jj+ii] * c_Mm[1] * c_Mp[Idy] * c_Mm[Idx];
s_IntPot[FID +Fdy] += s_CPot_z3[CID+jj+ii] * c_Mm[2] * c_Mp[Idy] * c_Mm[Idx];
s_IntPot[FID+Fdx+Fdy] += s_CPot_z1[CID+jj+ii] * c_Mm[0] * c_Mp[Idy] * c_Mp[Idx];
s_IntPot[FID+Fdx+Fdy] += s_CPot_z2[CID+jj+ii] * c_Mm[1] * c_Mp[Idy] * c_Mp[Idx];
s_IntPot[FID+Fdx+Fdy] += s_CPot_z3[CID+jj+ii] * c_Mm[2] * c_Mp[Idy] * c_Mp[Idx];
#else // upper plane
s_IntPot[FID ] += s_CPot_z1[CID+jj+ii] * c_Mp[0] * c_Mm[Idy] * c_Mm[Idx];
s_IntPot[FID ] += s_CPot_z2[CID+jj+ii] * c_Mp[1] * c_Mm[Idy] * c_Mm[Idx];
s_IntPot[FID ] += s_CPot_z3[CID+jj+ii] * c_Mp[2] * c_Mm[Idy] * c_Mm[Idx];
s_IntPot[FID+Fdx ] += s_CPot_z1[CID+jj+ii] * c_Mp[0] * c_Mm[Idy] * c_Mp[Idx];
s_IntPot[FID+Fdx ] += s_CPot_z2[CID+jj+ii] * c_Mp[1] * c_Mm[Idy] * c_Mp[Idx];
s_IntPot[FID+Fdx ] += s_CPot_z3[CID+jj+ii] * c_Mp[2] * c_Mm[Idy] * c_Mp[Idx];
s_IntPot[FID +Fdy] += s_CPot_z1[CID+jj+ii] * c_Mp[0] * c_Mp[Idy] * c_Mm[Idx];
s_IntPot[FID +Fdy] += s_CPot_z2[CID+jj+ii] * c_Mp[1] * c_Mp[Idy] * c_Mm[Idx];
s_IntPot[FID +Fdy] += s_CPot_z3[CID+jj+ii] * c_Mp[2] * c_Mp[Idy] * c_Mm[Idx];
s_IntPot[FID+Fdx+Fdy] += s_CPot_z1[CID+jj+ii] * c_Mp[0] * c_Mp[Idy] * c_Mp[Idx];
s_IntPot[FID+Fdx+Fdy] += s_CPot_z2[CID+jj+ii] * c_Mp[1] * c_Mp[Idy] * c_Mp[Idx];
s_IntPot[FID+Fdx+Fdy] += s_CPot_z3[CID+jj+ii] * c_Mp[2] * c_Mp[Idy] * c_Mp[Idx];
#endif
}} // for di,dj
}
break; // INT_QUAD
} // switch ( IntScheme )
} // if ( ID0 < (POT_NXT-2)*(POT_NXT-2) )
__syncthreads();
// store data into shared memory
ID1 = __umul24( 1+POT_USELESS+ty, POT_NXT_INT ) + 1+POT_USELESS+(tx<<1)+Disp1;
ID2 = __umul24( 1 +ty, POT_NXT_F/2 ) + tx +Disp1;
s_FPot[ID2] = s_IntPot[ID1];
// store data into registers
ID1 = __umul24( 1+POT_USELESS+ty, POT_NXT_INT ) + 1+POT_USELESS+(tx<<1)+Disp2;
BPot_xy1 = s_IntPot[ID1];
__syncthreads();
// for POT_USELESS == 0, no z plane is useless --> one more z plane (upper plane) to store
#if ( POT_GHOST_SIZE == 4 )
if ( ID0 < (POT_NXT-2)*(POT_NXT-2) )
{
switch ( IntScheme )
{
/*
case INT_CENTRAL :
{
s_IntPot[FID ] = s_CPot_z2[CID] - Slope_00 - Slope_01 + Slope_02;
s_IntPot[FID+Fdx ] = s_CPot_z2[CID] + Slope_00 - Slope_01 + Slope_02;
s_IntPot[FID +Fdy] = s_CPot_z2[CID] - Slope_00 + Slope_01 + Slope_02;
s_IntPot[FID+Fdx+Fdy] = s_CPot_z2[CID] + Slope_00 + Slope_01 + Slope_02;
}
break; // INT_CENTRAL
*/
case INT_CQUAD :
{
s_IntPot[FID ] = - Slope_00 - Slope_01 + Slope_02 + Slope_03 + Slope_04 - Slope_05 + Slope_06
- Slope_07 - Slope_08 + Slope_09 - Slope_10 - Slope_11 + Slope_12
+ s_CPot_z2[CID];
s_IntPot[FID+Fdx ] = + Slope_00 - Slope_01 + Slope_02 - Slope_03 + Slope_04 + Slope_05 - Slope_06
+ Slope_07 - Slope_08 - Slope_09 + Slope_10 + Slope_11 - Slope_12
+ s_CPot_z2[CID];
s_IntPot[FID +Fdy] = - Slope_00 + Slope_01 + Slope_02 + Slope_03 - Slope_04 + Slope_05 - Slope_06
- Slope_07 + Slope_08 - Slope_09 + Slope_10 + Slope_11 - Slope_12
+ s_CPot_z2[CID];
s_IntPot[FID+Fdx+Fdy] = + Slope_00 + Slope_01 + Slope_02 - Slope_03 - Slope_04 - Slope_05 + Slope_06
+ Slope_07 + Slope_08 + Slope_09 - Slope_10 - Slope_11 + Slope_12
+ s_CPot_z2[CID];
}
break; // INT_CQUAD
case INT_QUAD :
{
s_IntPot[FID ] = (real)0.0;
s_IntPot[FID+Fdx ] = (real)0.0;
s_IntPot[FID +Fdy] = (real)0.0;
s_IntPot[FID+Fdx+Fdy] = (real)0.0;
for (int dj=-1; dj<=1; dj++) { Idy = dj+1; jj = __mul24( dj, Cdy );
for (int di=-1; di<=1; di++) { Idx = di+1; ii = __mul24( di, Cdx );
s_IntPot[FID ] += s_CPot_z1[CID+jj+ii] * c_Mp[0] * c_Mm[Idy] * c_Mm[Idx];
s_IntPot[FID ] += s_CPot_z2[CID+jj+ii] * c_Mp[1] * c_Mm[Idy] * c_Mm[Idx];
s_IntPot[FID ] += s_CPot_z3[CID+jj+ii] * c_Mp[2] * c_Mm[Idy] * c_Mm[Idx];
s_IntPot[FID+Fdx ] += s_CPot_z1[CID+jj+ii] * c_Mp[0] * c_Mm[Idy] * c_Mp[Idx];
s_IntPot[FID+Fdx ] += s_CPot_z2[CID+jj+ii] * c_Mp[1] * c_Mm[Idy] * c_Mp[Idx];
s_IntPot[FID+Fdx ] += s_CPot_z3[CID+jj+ii] * c_Mp[2] * c_Mm[Idy] * c_Mp[Idx];
s_IntPot[FID +Fdy] += s_CPot_z1[CID+jj+ii] * c_Mp[0] * c_Mp[Idy] * c_Mm[Idx];
s_IntPot[FID +Fdy] += s_CPot_z2[CID+jj+ii] * c_Mp[1] * c_Mp[Idy] * c_Mm[Idx];
s_IntPot[FID +Fdy] += s_CPot_z3[CID+jj+ii] * c_Mp[2] * c_Mp[Idy] * c_Mm[Idx];
s_IntPot[FID+Fdx+Fdy] += s_CPot_z1[CID+jj+ii] * c_Mp[0] * c_Mp[Idy] * c_Mp[Idx];
s_IntPot[FID+Fdx+Fdy] += s_CPot_z2[CID+jj+ii] * c_Mp[1] * c_Mp[Idy] * c_Mp[Idx];
s_IntPot[FID+Fdx+Fdy] += s_CPot_z3[CID+jj+ii] * c_Mp[2] * c_Mp[Idy] * c_Mp[Idx];
}} // for di,dj
}
break; // INT_QUAD
} // switch ( IntScheme )
} // if ( ID0 < (POT_NXT-2)*(POT_NXT-2) )
__syncthreads();
// store the internal potential into shared memory
ID1 = __umul24( 1+ty, POT_NXT_INT ) + 1+(tx<<1)+Disp2;
ID2 = 1*POT_NXT_F*POT_NXT_F/2 + __umul24( 1+ty, POT_NXT_F/2) + tx+Disp2;
s_FPot[ID2] = s_IntPot[ID1];
// store the internal potential into registers
ID1 = __umul24( 1+ty, POT_NXT_INT ) + 1+(tx<<1)+Disp1;
RPot0 = s_IntPot[ID1];
// store the boundary potential into shared memory or registers
if ( ID0 < RHO_NXT/2 )
{
ID3 = ID0;
// shared memory: -yz plane
ID1 = __umul24( 1+(ID3<<1), POT_NXT_INT );
ID2 = POT_NXT_F*POT_NXT_F/2 + __umul24( 1+(ID3<<1), POT_NXT_F/2 );
s_FPot[ID2] = s_IntPot[ID1];
// shared memory: +yz plane
ID1 = __umul24( 2+(ID3<<1), POT_NXT_INT ) + POT_NXT_INT-1;
ID2 = POT_NXT_F*POT_NXT_F/2 + __umul24( 2+(ID3<<1), POT_NXT_F/2 ) + POT_NXT_F/2-1;
s_FPot[ID2] = s_IntPot[ID1];
// shared memory: -xz plane
ID1 = 1+(ID3<<1);
ID2 = POT_NXT_F*POT_NXT_F/2 + ID3;
s_FPot[ID2] = s_IntPot[ID1];
// shared memory: +xz plane
ID1 = (POT_NXT_INT-1)*POT_NXT_INT + 2+(ID3<<1);
ID2 = POT_NXT_F*POT_NXT_F/2 + (POT_NXT_F-1)*POT_NXT_F/2 + ID3+1;
s_FPot[ID2] = s_IntPot[ID1];
// registers: -yz plane
ID1 = __umul24( 2+(ID3<<1), POT_NXT_INT );
BPot_yz1 = s_IntPot[ID1];
// registers: +yz plane
ID1 = __umul24( 1+(ID3<<1), POT_NXT_INT ) + POT_NXT_INT-1;
BPot_yz2 = s_IntPot[ID1];
// registers: -xz plane
ID1 = 2+(ID3<<1);
BPot_xz1 = s_IntPot[ID1];
// registers: +xz plane
ID1 = (POT_NXT_INT-1)*POT_NXT_INT + 1+(ID3<<1);
BPot_xz2 = s_IntPot[ID1];
} // if ( ID0 < RHO_NXT/2 )
__syncthreads();
#endif // ( POT_GHOST_SIZE == 4 )
// (a2). interpolation : central z planes
// ===========================================================================
for (uint Cz=0; Cz<=POT_NXT-5; Cz++)
{
s_Temp = s_CPot_z1;
s_CPot_z1 = s_CPot_z2;
s_CPot_z2 = s_CPot_z3;
s_CPot_z3 = s_Temp;
// load one slice of the coarse-grid potential into the shared memory
for (uint t=ID0; t<POT_NXT*POT_NXT; t+=POT_NTHREAD)
{
ID1 = t + __umul24( Cz+3, POT_NXT*POT_NXT );
s_CPot_z3[t] = g_Pot_Array_In[bx][ID1];
}
__syncthreads();
// interpolation
if ( ID0 < (POT_NXT-2)*(POT_NXT-2) )
{
switch ( IntScheme )
{
/*
case INT_CENTRAL :
{
Slope_00 = (real)0.125 * ( s_CPot_z2[CID+Cdx] - s_CPot_z2[CID-Cdx] );
Slope_01 = (real)0.125 * ( s_CPot_z2[CID+Cdy] - s_CPot_z2[CID-Cdy] );
Slope_02 = (real)0.125 * ( s_CPot_z3[CID ] - s_CPot_z1[CID ] );
}
break; // INT_CENTRAL
*/
case INT_CQUAD :
{
Slope_00 = Const_8 * ( s_CPot_z2[CID+Cdx ] - s_CPot_z2[CID-Cdx ] );
Slope_01 = Const_8 * ( s_CPot_z2[CID+Cdy ] - s_CPot_z2[CID-Cdy ] );
Slope_02 = Const_8 * ( s_CPot_z3[CID ] - s_CPot_z1[CID ] );
Slope_03 = Const_64 * ( s_CPot_z1[CID+Cdx ] - s_CPot_z1[CID-Cdx ] );
Slope_04 = Const_64 * ( s_CPot_z1[CID +Cdy] - s_CPot_z1[CID -Cdy] );
Slope_05 = Const_64 * ( s_CPot_z2[CID+Cdx-Cdy] - s_CPot_z2[CID-Cdx-Cdy] );
Slope_06 = Const_64 * ( s_CPot_z2[CID+Cdx+Cdy] - s_CPot_z2[CID-Cdx+Cdy] );
Slope_07 = Const_64 * ( s_CPot_z3[CID+Cdx ] - s_CPot_z3[CID-Cdx ] );
Slope_08 = Const_64 * ( s_CPot_z3[CID +Cdy] - s_CPot_z3[CID -Cdy] );
Slope_09 = Const_512 * ( s_CPot_z1[CID+Cdx-Cdy] - s_CPot_z1[CID-Cdx-Cdy] );
Slope_10 = Const_512 * ( s_CPot_z1[CID+Cdx+Cdy] - s_CPot_z1[CID-Cdx+Cdy] );
Slope_11 = Const_512 * ( s_CPot_z3[CID+Cdx-Cdy] - s_CPot_z3[CID-Cdx-Cdy] );
Slope_12 = Const_512 * ( s_CPot_z3[CID+Cdx+Cdy] - s_CPot_z3[CID-Cdx+Cdy] );
}
break; // INT_CQUAD
} // switch ( IntScheme )
} // if ( ID0 < (POT_NXT-2)*(POT_NXT-2) )
#if ( POT_GHOST_SIZE == 4 )
Disp16 = Disp2;
#else
Disp16 = Disp1;
#endif
// since the amount of shared memory is exhausted, we can only save one z plane at a time
for (int UpDown=0; UpDown<2; UpDown++)
{
const real Sign = (real)-1.0 + (real)2.0*(real)UpDown;
if ( ID0 < (POT_NXT-2)*(POT_NXT-2) )
{
switch ( IntScheme )
{
/*
case INT_CENTRAL :
{
s_IntPot[FID ] = s_CPot_z2[CID] - Slope_00 - Slope_01 + Sign*Slope_02;
s_IntPot[FID+Fdx ] = s_CPot_z2[CID] + Slope_00 - Slope_01 + Sign*Slope_02;
s_IntPot[FID +Fdy] = s_CPot_z2[CID] - Slope_00 + Slope_01 + Sign*Slope_02;
s_IntPot[FID+Fdx+Fdy] = s_CPot_z2[CID] + Slope_00 + Slope_01 + Sign*Slope_02;
}
break; // INT_CENTRAL
*/
case INT_CQUAD :
{
s_IntPot[FID ] = - Slope_00 - Slope_01 - Slope_05 + Slope_06
- Sign*( - Slope_02 - Slope_03 - Slope_04 + Slope_07 + Slope_08
- Slope_09 + Slope_10 + Slope_11 - Slope_12 )
+ s_CPot_z2[CID];
s_IntPot[FID+Fdx ] = + Slope_00 - Slope_01 + Slope_05 - Slope_06
- Sign*( - Slope_02 + Slope_03 - Slope_04 - Slope_07 + Slope_08
+ Slope_09 - Slope_10 - Slope_11 + Slope_12 )
+ s_CPot_z2[CID];
s_IntPot[FID +Fdy] = - Slope_00 + Slope_01 + Slope_05 - Slope_06
- Sign*( - Slope_02 - Slope_03 + Slope_04 + Slope_07 - Slope_08
+ Slope_09 - Slope_10 - Slope_11 + Slope_12 )
+ s_CPot_z2[CID];
s_IntPot[FID+Fdx+Fdy] = + Slope_00 + Slope_01 - Slope_05 + Slope_06
- Sign*( - Slope_02 + Slope_03 + Slope_04 - Slope_07 - Slope_08
- Slope_09 + Slope_10 + Slope_11 - Slope_12 )
+ s_CPot_z2[CID];
}
break; // INT_CQUAD
case INT_QUAD :
{
s_IntPot[FID ] = (real)0.0;
s_IntPot[FID+Fdx ] = (real)0.0;
s_IntPot[FID +Fdy] = (real)0.0;
s_IntPot[FID+Fdx+Fdy] = (real)0.0;
if ( UpDown == 0 )
{
for (int dj=-1; dj<=1; dj++) { Idy = dj+1; jj = __mul24( dj, Cdy );
for (int di=-1; di<=1; di++) { Idx = di+1; ii = __mul24( di, Cdx );
s_IntPot[FID ] += s_CPot_z1[CID+jj+ii] * c_Mm[0] * c_Mm[Idy] * c_Mm[Idx];
s_IntPot[FID ] += s_CPot_z2[CID+jj+ii] * c_Mm[1] * c_Mm[Idy] * c_Mm[Idx];
s_IntPot[FID ] += s_CPot_z3[CID+jj+ii] * c_Mm[2] * c_Mm[Idy] * c_Mm[Idx];
s_IntPot[FID+Fdx ] += s_CPot_z1[CID+jj+ii] * c_Mm[0] * c_Mm[Idy] * c_Mp[Idx];
s_IntPot[FID+Fdx ] += s_CPot_z2[CID+jj+ii] * c_Mm[1] * c_Mm[Idy] * c_Mp[Idx];
s_IntPot[FID+Fdx ] += s_CPot_z3[CID+jj+ii] * c_Mm[2] * c_Mm[Idy] * c_Mp[Idx];
s_IntPot[FID +Fdy] += s_CPot_z1[CID+jj+ii] * c_Mm[0] * c_Mp[Idy] * c_Mm[Idx];
s_IntPot[FID +Fdy] += s_CPot_z2[CID+jj+ii] * c_Mm[1] * c_Mp[Idy] * c_Mm[Idx];
s_IntPot[FID +Fdy] += s_CPot_z3[CID+jj+ii] * c_Mm[2] * c_Mp[Idy] * c_Mm[Idx];
s_IntPot[FID+Fdx+Fdy] += s_CPot_z1[CID+jj+ii] * c_Mm[0] * c_Mp[Idy] * c_Mp[Idx];
s_IntPot[FID+Fdx+Fdy] += s_CPot_z2[CID+jj+ii] * c_Mm[1] * c_Mp[Idy] * c_Mp[Idx];
s_IntPot[FID+Fdx+Fdy] += s_CPot_z3[CID+jj+ii] * c_Mm[2] * c_Mp[Idy] * c_Mp[Idx];
}} // for di,dj
} // if ( UpDown == 0 )
else
{
for (int dj=-1; dj<=1; dj++) { Idy = dj+1; jj = __mul24( dj, Cdy );
for (int di=-1; di<=1; di++) { Idx = di+1; ii = __mul24( di, Cdx );
s_IntPot[FID ] += s_CPot_z1[CID+jj+ii] * c_Mp[0] * c_Mm[Idy] * c_Mm[Idx];
s_IntPot[FID ] += s_CPot_z2[CID+jj+ii] * c_Mp[1] * c_Mm[Idy] * c_Mm[Idx];
s_IntPot[FID ] += s_CPot_z3[CID+jj+ii] * c_Mp[2] * c_Mm[Idy] * c_Mm[Idx];
s_IntPot[FID+Fdx ] += s_CPot_z1[CID+jj+ii] * c_Mp[0] * c_Mm[Idy] * c_Mp[Idx];
s_IntPot[FID+Fdx ] += s_CPot_z2[CID+jj+ii] * c_Mp[1] * c_Mm[Idy] * c_Mp[Idx];
s_IntPot[FID+Fdx ] += s_CPot_z3[CID+jj+ii] * c_Mp[2] * c_Mm[Idy] * c_Mp[Idx];
s_IntPot[FID +Fdy] += s_CPot_z1[CID+jj+ii] * c_Mp[0] * c_Mp[Idy] * c_Mm[Idx];
s_IntPot[FID +Fdy] += s_CPot_z2[CID+jj+ii] * c_Mp[1] * c_Mp[Idy] * c_Mm[Idx];
s_IntPot[FID +Fdy] += s_CPot_z3[CID+jj+ii] * c_Mp[2] * c_Mp[Idy] * c_Mm[Idx];
s_IntPot[FID+Fdx+Fdy] += s_CPot_z1[CID+jj+ii] * c_Mp[0] * c_Mp[Idy] * c_Mp[Idx];
s_IntPot[FID+Fdx+Fdy] += s_CPot_z2[CID+jj+ii] * c_Mp[1] * c_Mp[Idy] * c_Mp[Idx];
s_IntPot[FID+Fdx+Fdy] += s_CPot_z3[CID+jj+ii] * c_Mp[2] * c_Mp[Idy] * c_Mp[Idx];
}} // for di,dj
} // if ( UpDown == 0 ) ... else ...
}
break; // INT_QUAD
} // switch ( IntScheme )
} //if ( ID0 < (POT_NXT-2)*(POT_NXT-2) )
__syncthreads();
// store the internal potential into shared memory
ID1 = __umul24( 1+POT_USELESS+ty, POT_NXT_INT ) + 1+POT_USELESS+(tx<<1)+(Disp16^1);
ID2 = __umul24( 2-POT_USELESS+UpDown+Cz*2, POT_NXT_F*POT_NXT_F/2 )
+ __umul24( 1+ty, POT_NXT_F/2) + tx+(Disp16^1);
s_FPot[ID2] = s_IntPot[ID1];
// store the internal potential into registers
ID1 = __umul24( 1+POT_USELESS+ty, POT_NXT_INT ) + 1+POT_USELESS+(tx<<1)+Disp16;
#if ( POT_GHOST_SIZE == 4 )
if ( UpDown == 0 )
{
switch ( Cz )
{
case 0: RPot1 = s_IntPot[ID1]; break;
case 1: RPot3 = s_IntPot[ID1]; break;
case 2: RPot5 = s_IntPot[ID1]; break;
case 3: RPot7 = s_IntPot[ID1]; break;
case 4: RPot9 = s_IntPot[ID1]; break;
case 5: RPot11 = s_IntPot[ID1]; break;
}
}
else
{
switch ( Cz )
{
case 0: RPot2 = s_IntPot[ID1]; break;
case 1: RPot4 = s_IntPot[ID1]; break;
case 2: RPot6 = s_IntPot[ID1]; break;
case 3: RPot8 = s_IntPot[ID1]; break;
case 4: RPot10 = s_IntPot[ID1]; break;
case 5: RPot12 = s_IntPot[ID1]; break;
}
}
#else
if ( UpDown == 0 )
{
switch ( Cz )
{
case 0: RPot0 = s_IntPot[ID1]; break;
case 1: RPot2 = s_IntPot[ID1]; break;
case 2: RPot4 = s_IntPot[ID1]; break;
case 3: RPot6 = s_IntPot[ID1]; break;
case 4: RPot8 = s_IntPot[ID1]; break;
case 5: RPot10 = s_IntPot[ID1]; break;
case 6: RPot12 = s_IntPot[ID1]; break;
case 7: RPot14 = s_IntPot[ID1]; break;
}
}
else
{
switch ( Cz )
{
case 0: RPot1 = s_IntPot[ID1]; break;
case 1: RPot3 = s_IntPot[ID1]; break;
case 2: RPot5 = s_IntPot[ID1]; break;
case 3: RPot7 = s_IntPot[ID1]; break;
case 4: RPot9 = s_IntPot[ID1]; break;
case 5: RPot11 = s_IntPot[ID1]; break;
case 6: RPot13 = s_IntPot[ID1]; break;
case 7: RPot15 = s_IntPot[ID1]; break;
}
}
#endif // #if ( POT_GHOST_SIZE == 4 )
// store the boundary potential into shared memory or registers
if ( ID0 >= (2*Cz+UpDown+1-POT_USELESS)*RHO_NXT/2 && ID0 < (2*Cz+UpDown+2-POT_USELESS)*RHO_NXT/2 )
{
ID3 = ID0 % (RHO_NXT/2) ;
// shared memory: -yz plane
ID1 = __umul24( 2-UpDown+2*POT_USELESS*UpDown+(ID3<<1), POT_NXT_INT ) + POT_USELESS;
ID2 = __umul24( 2-POT_USELESS+UpDown+Cz*2, POT_NXT_F*POT_NXT_F/2 )
+ __umul24( 2-UpDown+2*POT_USELESS*UpDown-POT_USELESS+(ID3<<1), POT_NXT_F/2 );
s_FPot[ID2] = s_IntPot[ID1];
// shared memory: +yz plane
ID1 = __umul24( 1+UpDown-2*POT_USELESS*UpDown+2*POT_USELESS+(ID3<<1), POT_NXT_INT )
+ POT_NXT_INT-1-POT_USELESS;
ID2 = __umul24( 2-POT_USELESS+UpDown+Cz*2, POT_NXT_F*POT_NXT_F/2 )
+ __umul24( 1+UpDown-2*POT_USELESS*UpDown+POT_USELESS+(ID3<<1), POT_NXT_F/2 )
+ POT_NXT_F/2-1;
s_FPot[ID2] = s_IntPot[ID1];
// shared memory: -xz plane
ID1 = POT_USELESS*POT_NXT_INT + 2-UpDown+2*POT_USELESS*UpDown+(ID3<<1);
ID2 = __umul24( 2-POT_USELESS+UpDown+Cz*2, POT_NXT_F*POT_NXT_F/2 )
+ 1-UpDown-POT_USELESS+2*POT_USELESS*UpDown+ID3;
s_FPot[ID2] = s_IntPot[ID1];
// shared memory: +xz plane
ID1 = (POT_NXT_INT-1-POT_USELESS)*POT_NXT_INT
+ 1+UpDown-2*POT_USELESS*UpDown+2*POT_USELESS+(ID3<<1);
ID2 = __umul24( 2-POT_USELESS+UpDown+Cz*2, POT_NXT_F*POT_NXT_F/2 )
+ (POT_NXT_F-1)*POT_NXT_F/2 + UpDown-2*POT_USELESS*UpDown+POT_USELESS+ID3;
s_FPot[ID2] = s_IntPot[ID1];
// registers: -yz plane
ID1 = __umul24( 1+UpDown-2*POT_USELESS*UpDown+2*POT_USELESS+(ID3<<1), POT_NXT_INT )
+ POT_USELESS;
BPot_yz1 = s_IntPot[ID1];
// registers: +yz plane
ID1 = __umul24( 2-UpDown+2*POT_USELESS*UpDown+(ID3<<1), POT_NXT_INT )
+ POT_NXT_INT-1-POT_USELESS;
BPot_yz2 = s_IntPot[ID1];
// registers: -xz plane
ID1 = POT_USELESS*POT_NXT_INT + 1+UpDown-2*POT_USELESS*UpDown+2*POT_USELESS+(ID3<<1);
BPot_xz1 = s_IntPot[ID1];
// registers: +xz plane
ID1 = (POT_NXT_INT-1-POT_USELESS)*POT_NXT_INT + 2-UpDown+2*POT_USELESS*UpDown+(ID3<<1);
BPot_xz2 = s_IntPot[ID1];
} // if ( ID0 >= (2*Cz+UpDown+1-POT_USELESS)*RHO_NXT/2 && ID0 < (2*Cz+UpDown+2-POT_USELESS)*RHO_NXT/2 )
Disp16 = Disp16^1;
__syncthreads();
} // for (int UpDown=0; UpDown<2; UpDown++)
} // for (uint Cz=0; Cz<=POT_NXT-5; Cz++)
// (a3). interpolation : the highest z plane
// ===========================================================================
// load one slice of the coarse-grid potential into shared memory
s_Temp = s_CPot_z1;
s_CPot_z1 = s_CPot_z2;
s_CPot_z2 = s_CPot_z3;
s_CPot_z3 = s_Temp;
for (uint t=ID0; t<POT_NXT*POT_NXT; t+=POT_NTHREAD)
{
ID1 = t + (POT_NXT-1)*POT_NXT*POT_NXT;
s_CPot_z3[t] = g_Pot_Array_In[bx][ID1];
}
__syncthreads();
// interpolation
if ( ID0 < (POT_NXT-2)*(POT_NXT-2) )
{
switch ( IntScheme )
{
/*
case INT_CENTRAL :
{
Slope_00 = (real)0.125 * ( s_CPot_z2[CID+Cdx] - s_CPot_z2[CID-Cdx] );
Slope_01 = (real)0.125 * ( s_CPot_z2[CID+Cdy] - s_CPot_z2[CID-Cdy] );
Slope_02 = (real)0.125 * ( s_CPot_z3[CID ] - s_CPot_z1[CID ] );
#if ( POT_GHOST_SIZE == 4 ) // upper plane
s_IntPot[FID ] = s_CPot_z2[CID] - Slope_00 - Slope_01 + Slope_02;
s_IntPot[FID+Fdx ] = s_CPot_z2[CID] + Slope_00 - Slope_01 + Slope_02;
s_IntPot[FID +Fdy] = s_CPot_z2[CID] - Slope_00 + Slope_01 + Slope_02;
s_IntPot[FID+Fdx+Fdy] = s_CPot_z2[CID] + Slope_00 + Slope_01 + Slope_02;
#else // lower plane
s_IntPot[FID ] = s_CPot_z2[CID] - Slope_00 - Slope_01 - Slope_02;
s_IntPot[FID+Fdx ] = s_CPot_z2[CID] + Slope_00 - Slope_01 - Slope_02;
s_IntPot[FID +Fdy] = s_CPot_z2[CID] - Slope_00 + Slope_01 - Slope_02;
s_IntPot[FID+Fdx+Fdy] = s_CPot_z2[CID] + Slope_00 + Slope_01 - Slope_02;
#endif
}
break; // INT_CENTRAL
*/
case INT_CQUAD :
{
Slope_00 = Const_8 * ( s_CPot_z2[CID+Cdx ] - s_CPot_z2[CID-Cdx ] );
Slope_01 = Const_8 * ( s_CPot_z2[CID+Cdy ] - s_CPot_z2[CID-Cdy ] );
Slope_02 = Const_8 * ( s_CPot_z3[CID ] - s_CPot_z1[CID ] );
Slope_03 = Const_64 * ( s_CPot_z1[CID+Cdx ] - s_CPot_z1[CID-Cdx ] );
Slope_04 = Const_64 * ( s_CPot_z1[CID +Cdy] - s_CPot_z1[CID -Cdy] );
Slope_05 = Const_64 * ( s_CPot_z2[CID+Cdx-Cdy] - s_CPot_z2[CID-Cdx-Cdy] );
Slope_06 = Const_64 * ( s_CPot_z2[CID+Cdx+Cdy] - s_CPot_z2[CID-Cdx+Cdy] );
Slope_07 = Const_64 * ( s_CPot_z3[CID+Cdx ] - s_CPot_z3[CID-Cdx ] );
Slope_08 = Const_64 * ( s_CPot_z3[CID +Cdy] - s_CPot_z3[CID -Cdy] );
Slope_09 = Const_512 * ( s_CPot_z1[CID+Cdx-Cdy] - s_CPot_z1[CID-Cdx-Cdy] );
Slope_10 = Const_512 * ( s_CPot_z1[CID+Cdx+Cdy] - s_CPot_z1[CID-Cdx+Cdy] );
Slope_11 = Const_512 * ( s_CPot_z3[CID+Cdx-Cdy] - s_CPot_z3[CID-Cdx-Cdy] );
Slope_12 = Const_512 * ( s_CPot_z3[CID+Cdx+Cdy] - s_CPot_z3[CID-Cdx+Cdy] );
#if ( POT_GHOST_SIZE == 4 ) // upper plane
s_IntPot[FID ] = - Slope_00 - Slope_01 + Slope_02 + Slope_03 + Slope_04 - Slope_05 + Slope_06
- Slope_07 - Slope_08 + Slope_09 - Slope_10 - Slope_11 + Slope_12
+ s_CPot_z2[CID];
s_IntPot[FID+Fdx ] = + Slope_00 - Slope_01 + Slope_02 - Slope_03 + Slope_04 + Slope_05 - Slope_06
+ Slope_07 - Slope_08 - Slope_09 + Slope_10 + Slope_11 - Slope_12
+ s_CPot_z2[CID];
s_IntPot[FID +Fdy] = - Slope_00 + Slope_01 + Slope_02 + Slope_03 - Slope_04 + Slope_05 - Slope_06
- Slope_07 + Slope_08 - Slope_09 + Slope_10 + Slope_11 - Slope_12
+ s_CPot_z2[CID];
s_IntPot[FID+Fdx+Fdy] = + Slope_00 + Slope_01 + Slope_02 - Slope_03 - Slope_04 - Slope_05 + Slope_06
+ Slope_07 + Slope_08 + Slope_09 - Slope_10 - Slope_11 + Slope_12
+ s_CPot_z2[CID];
#else // lower plane
s_IntPot[FID ] = - Slope_00 - Slope_01 - Slope_02 - Slope_03 - Slope_04 - Slope_05 + Slope_06
+ Slope_07 + Slope_08 - Slope_09 + Slope_10 + Slope_11 - Slope_12
+ s_CPot_z2[CID];
s_IntPot[FID+Fdx ] = + Slope_00 - Slope_01 - Slope_02 + Slope_03 - Slope_04 + Slope_05 - Slope_06
- Slope_07 + Slope_08 + Slope_09 - Slope_10 - Slope_11 + Slope_12
+ s_CPot_z2[CID];
s_IntPot[FID +Fdy] = - Slope_00 + Slope_01 - Slope_02 - Slope_03 + Slope_04 + Slope_05 - Slope_06
+ Slope_07 - Slope_08 + Slope_09 - Slope_10 - Slope_11 + Slope_12
+ s_CPot_z2[CID];
s_IntPot[FID+Fdx+Fdy] = + Slope_00 + Slope_01 - Slope_02 + Slope_03 + Slope_04 - Slope_05 + Slope_06
- Slope_07 - Slope_08 - Slope_09 + Slope_10 + Slope_11 - Slope_12
+ s_CPot_z2[CID];
#endif
}
break; // INT_CQUAD
case INT_QUAD :
{
s_IntPot[FID ] = (real)0.0;
s_IntPot[FID+Fdx ] = (real)0.0;
s_IntPot[FID +Fdy] = (real)0.0;
s_IntPot[FID+Fdx+Fdy] = (real)0.0;
for (int dj=-1; dj<=1; dj++) { Idy = dj+1; jj = __mul24( dj, Cdy );
for (int di=-1; di<=1; di++) { Idx = di+1; ii = __mul24( di, Cdx );
#if ( POT_GHOST_SIZE == 4 ) // upper plane
s_IntPot[FID ] += s_CPot_z1[CID+jj+ii] * c_Mp[0] * c_Mm[Idy] * c_Mm[Idx];
s_IntPot[FID ] += s_CPot_z2[CID+jj+ii] * c_Mp[1] * c_Mm[Idy] * c_Mm[Idx];
s_IntPot[FID ] += s_CPot_z3[CID+jj+ii] * c_Mp[2] * c_Mm[Idy] * c_Mm[Idx];
s_IntPot[FID+Fdx ] += s_CPot_z1[CID+jj+ii] * c_Mp[0] * c_Mm[Idy] * c_Mp[Idx];
s_IntPot[FID+Fdx ] += s_CPot_z2[CID+jj+ii] * c_Mp[1] * c_Mm[Idy] * c_Mp[Idx];
s_IntPot[FID+Fdx ] += s_CPot_z3[CID+jj+ii] * c_Mp[2] * c_Mm[Idy] * c_Mp[Idx];
s_IntPot[FID +Fdy] += s_CPot_z1[CID+jj+ii] * c_Mp[0] * c_Mp[Idy] * c_Mm[Idx];
s_IntPot[FID +Fdy] += s_CPot_z2[CID+jj+ii] * c_Mp[1] * c_Mp[Idy] * c_Mm[Idx];
s_IntPot[FID +Fdy] += s_CPot_z3[CID+jj+ii] * c_Mp[2] * c_Mp[Idy] * c_Mm[Idx];
s_IntPot[FID+Fdx+Fdy] += s_CPot_z1[CID+jj+ii] * c_Mp[0] * c_Mp[Idy] * c_Mp[Idx];
s_IntPot[FID+Fdx+Fdy] += s_CPot_z2[CID+jj+ii] * c_Mp[1] * c_Mp[Idy] * c_Mp[Idx];
s_IntPot[FID+Fdx+Fdy] += s_CPot_z3[CID+jj+ii] * c_Mp[2] * c_Mp[Idy] * c_Mp[Idx];
#else // lower plane
s_IntPot[FID ] += s_CPot_z1[CID+jj+ii] * c_Mm[0] * c_Mm[Idy] * c_Mm[Idx];
s_IntPot[FID ] += s_CPot_z2[CID+jj+ii] * c_Mm[1] * c_Mm[Idy] * c_Mm[Idx];
s_IntPot[FID ] += s_CPot_z3[CID+jj+ii] * c_Mm[2] * c_Mm[Idy] * c_Mm[Idx];
s_IntPot[FID+Fdx ] += s_CPot_z1[CID+jj+ii] * c_Mm[0] * c_Mm[Idy] * c_Mp[Idx];
s_IntPot[FID+Fdx ] += s_CPot_z2[CID+jj+ii] * c_Mm[1] * c_Mm[Idy] * c_Mp[Idx];
s_IntPot[FID+Fdx ] += s_CPot_z3[CID+jj+ii] * c_Mm[2] * c_Mm[Idy] * c_Mp[Idx];
s_IntPot[FID +Fdy] += s_CPot_z1[CID+jj+ii] * c_Mm[0] * c_Mp[Idy] * c_Mm[Idx];
s_IntPot[FID +Fdy] += s_CPot_z2[CID+jj+ii] * c_Mm[1] * c_Mp[Idy] * c_Mm[Idx];
s_IntPot[FID +Fdy] += s_CPot_z3[CID+jj+ii] * c_Mm[2] * c_Mp[Idy] * c_Mm[Idx];
s_IntPot[FID+Fdx+Fdy] += s_CPot_z1[CID+jj+ii] * c_Mm[0] * c_Mp[Idy] * c_Mp[Idx];
s_IntPot[FID+Fdx+Fdy] += s_CPot_z2[CID+jj+ii] * c_Mm[1] * c_Mp[Idy] * c_Mp[Idx];
s_IntPot[FID+Fdx+Fdy] += s_CPot_z3[CID+jj+ii] * c_Mm[2] * c_Mp[Idy] * c_Mp[Idx];
#endif
}} // for di,dj
}
break; // INT_QUAD
} // switch ( IntScheme )
} // if ( ID0 < (POT_NXT-2)*(POT_NXT-2) )
__syncthreads();
// store the boundary potential into shared memory
ID1 = __umul24( 1+POT_USELESS+ty, POT_NXT_INT ) + 1+POT_USELESS+(tx<<1)+Disp2;
ID2 = (POT_NXT_F-1)*POT_NXT_F*POT_NXT_F/2 + __umul24( 1+ty, POT_NXT_F/2 ) + tx+Disp2;
s_FPot[ID2] = s_IntPot[ID1];
// store the boundary potential into registers
ID1 = __umul24( 1+POT_USELESS+ty, POT_NXT_INT ) + 1+POT_USELESS+(tx<<1)+Disp1;
BPot_xy2 = s_IntPot[ID1];
__syncthreads();
// for POT_USELESS == 0, no z plane is useless --> one more z plane (lower plane) to store
#if ( POT_GHOST_SIZE == 4 )
if ( ID0 < (POT_NXT-2)*(POT_NXT-2) )
{
switch ( IntScheme )
{
/*
case INT_CENTRAL :
{
s_IntPot[FID ] = s_CPot_z2[CID] - Slope_00 - Slope_01 - Slope_02;
s_IntPot[FID+Fdx ] = s_CPot_z2[CID] + Slope_00 - Slope_01 - Slope_02;
s_IntPot[FID +Fdy] = s_CPot_z2[CID] - Slope_00 + Slope_01 - Slope_02;
s_IntPot[FID+Fdx+Fdy] = s_CPot_z2[CID] + Slope_00 + Slope_01 - Slope_02;
}
break; // INT_CENTRAL
*/
case INT_CQUAD :
{
s_IntPot[FID ] = - Slope_00 - Slope_01 - Slope_02 - Slope_03 - Slope_04 - Slope_05 + Slope_06
+ Slope_07 + Slope_08 - Slope_09 + Slope_10 + Slope_11 - Slope_12
+ s_CPot_z2[CID];
s_IntPot[FID+Fdx ] = + Slope_00 - Slope_01 - Slope_02 + Slope_03 - Slope_04 + Slope_05 - Slope_06
- Slope_07 + Slope_08 + Slope_09 - Slope_10 - Slope_11 + Slope_12
+ s_CPot_z2[CID];
s_IntPot[FID +Fdy] = - Slope_00 + Slope_01 - Slope_02 - Slope_03 + Slope_04 + Slope_05 - Slope_06
+ Slope_07 - Slope_08 + Slope_09 - Slope_10 - Slope_11 + Slope_12
+ s_CPot_z2[CID];
s_IntPot[FID+Fdx+Fdy] = + Slope_00 + Slope_01 - Slope_02 + Slope_03 + Slope_04 - Slope_05 + Slope_06
- Slope_07 - Slope_08 - Slope_09 + Slope_10 + Slope_11 - Slope_12
+ s_CPot_z2[CID];
}
break; // INT_CQUAD
case INT_QUAD :
{
s_IntPot[FID ] = (real)0.0;
s_IntPot[FID+Fdx ] = (real)0.0;
s_IntPot[FID +Fdy] = (real)0.0;
s_IntPot[FID+Fdx+Fdy] = (real)0.0;
for (int dj=-1; dj<=1; dj++) { Idy = dj+1; jj = __mul24( dj, Cdy );
for (int di=-1; di<=1; di++) { Idx = di+1; ii = __mul24( di, Cdx );
s_IntPot[FID ] += s_CPot_z1[CID+jj+ii] * c_Mm[0] * c_Mm[Idy] * c_Mm[Idx];
s_IntPot[FID ] += s_CPot_z2[CID+jj+ii] * c_Mm[1] * c_Mm[Idy] * c_Mm[Idx];
s_IntPot[FID ] += s_CPot_z3[CID+jj+ii] * c_Mm[2] * c_Mm[Idy] * c_Mm[Idx];
s_IntPot[FID+Fdx ] += s_CPot_z1[CID+jj+ii] * c_Mm[0] * c_Mm[Idy] * c_Mp[Idx];
s_IntPot[FID+Fdx ] += s_CPot_z2[CID+jj+ii] * c_Mm[1] * c_Mm[Idy] * c_Mp[Idx];
s_IntPot[FID+Fdx ] += s_CPot_z3[CID+jj+ii] * c_Mm[2] * c_Mm[Idy] * c_Mp[Idx];
s_IntPot[FID +Fdy] += s_CPot_z1[CID+jj+ii] * c_Mm[0] * c_Mp[Idy] * c_Mm[Idx];
s_IntPot[FID +Fdy] += s_CPot_z2[CID+jj+ii] * c_Mm[1] * c_Mp[Idy] * c_Mm[Idx];
s_IntPot[FID +Fdy] += s_CPot_z3[CID+jj+ii] * c_Mm[2] * c_Mp[Idy] * c_Mm[Idx];
s_IntPot[FID+Fdx+Fdy] += s_CPot_z1[CID+jj+ii] * c_Mm[0] * c_Mp[Idy] * c_Mp[Idx];
s_IntPot[FID+Fdx+Fdy] += s_CPot_z2[CID+jj+ii] * c_Mm[1] * c_Mp[Idy] * c_Mp[Idx];
s_IntPot[FID+Fdx+Fdy] += s_CPot_z3[CID+jj+ii] * c_Mm[2] * c_Mp[Idy] * c_Mp[Idx];
}} // for di,dj
}
break; // INT_QUAD
} // switch ( IntScheme )
} // if ( ID0 < (POT_NXT-2)*(POT_NXT-2) )
__syncthreads();
// store the internal potential into shared memory
ID1 = __umul24( 1+ty, POT_NXT_INT ) + 1+(tx<<1)+Disp1;
ID2 = (POT_NXT_F-2)*POT_NXT_F*POT_NXT_F/2 + __umul24( 1+ty, POT_NXT_F/2) + tx+Disp1;
s_FPot[ID2] = s_IntPot[ID1];
// store the internal potential into registers
ID1 = __umul24( 1+ty, POT_NXT_INT ) + 1+(tx<<1)+Disp2;
RPot13 = s_IntPot[ID1];
// store the boundary potential into shared memory or registers
if ( ID0 >= 13*RHO_NXT/2 && ID0 < 14*RHO_NXT/2 )
{
ID3 = ID0 % (RHO_NXT/2);
// shared memory: -yz plane
ID1 = __umul24( 2+(ID3<<1), POT_NXT_INT );
ID2 = (POT_NXT_F-2)*POT_NXT_F*POT_NXT_F/2 + __umul24( 2+(ID3<<1), POT_NXT_F/2 );
s_FPot[ID2] = s_IntPot[ID1];
// shared memory: +yz plane
ID1 = __umul24( 1+(ID3<<1), POT_NXT_INT ) + POT_NXT_INT-1;
ID2 = (POT_NXT_F-2)*POT_NXT_F*POT_NXT_F/2 + __umul24( 1+(ID3<<1), POT_NXT_F/2 ) + POT_NXT_F/2-1;
s_FPot[ID2] = s_IntPot[ID1];
// shared memory: -xz plane
ID1 = 2+(ID3<<1);
ID2 = (POT_NXT_F-2)*POT_NXT_F*POT_NXT_F/2 + ID3+1;
s_FPot[ID2] = s_IntPot[ID1];
// shared memory: +xz plane
ID1 = (POT_NXT_INT-1)*POT_NXT_INT + 1+(ID3<<1);
ID2 = (POT_NXT_F-2)*POT_NXT_F*POT_NXT_F/2 + (POT_NXT_F-1)*POT_NXT_F/2 + ID3;
s_FPot[ID2] = s_IntPot[ID1];
// registers: -yz plane
ID1 = __umul24( 1+(ID3<<1), POT_NXT_INT );
BPot_yz1 = s_IntPot[ID1];
// registers: +yz plane
ID1 = __umul24( 2+(ID3<<1), POT_NXT_INT ) + POT_NXT_INT-1;
BPot_yz2 = s_IntPot[ID1];
// registers: -xz plane
ID1 = 1+(ID3<<1);
BPot_xz1 = s_IntPot[ID1];
// registers: +xz plane
ID1 = (POT_NXT_INT-1)*POT_NXT_INT + 2+(ID3<<1);
BPot_xz2 = s_IntPot[ID1];
} // if ( ID0 >= 13*RHO_NXT/2 && ID0 < 14*RHO_NXT/2 )
__syncthreads();
#endif // ( POT_GHOST_SIZE == 4 )
// b. use the SOR scheme to evaluate potential
// ---------------------------------------------------------------------------------------------------------
Residual_Total_Old = __FLT_MAX__;
for (int Iter=0; Iter<Max_Iter; Iter++)
{
s_Residual_Total[ID0] = (real)0.0;
PotCen = PotCen0 + Disp1;
RhoCen = RhoCen0 + Disp1;
Disp4 = Disp1;
Disp5 = Disp2;
Disp6 = Disp3;
Disp8 = Disp7;
Disp9 = Disp2;
Disp10 = Disp13;
Disp11 = Disp1;
Disp14 = Disp8;
Disp15 = Disp9;
for (int pass=0; pass<2; pass++)
{
// (b1). evaluate residual, update potential
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// z = 0 plane
// =======================================
// evaluate residual
Residual = ( s_FPot[kp] + s_FPot[km] + s_FPot[jp] + s_FPot[jm] + s_FPot[ip] + s_FPot[im]
- (real)6.0*RPot0 - Const*g_Rho_Array[bx][RhoCen] );
// update potential
RPot0 += Omega_6*Residual;
// save the absolute value of residual of each grid into a shared array for evaluating the sum
s_Residual_Total[ID0] += FABS( Residual );
PotCen += dz + Disp6;
RhoCen += RHO_NXT*RHO_NXT + Disp6;
Disp14 = Disp10;
Disp15 = Disp11;
// z = 1 plane
// =======================================
// evaluate residual
Residual = ( s_FPot[kp] + s_FPot[km] + s_FPot[jp] + s_FPot[jm] + s_FPot[ip] + s_FPot[im]
- (real)6.0*RPot1 - Const*g_Rho_Array[bx][RhoCen] );
// update potential
RPot1 += Omega_6*Residual;
// save the absolute value of residual of each grid into a shared array for evaluating the sum
s_Residual_Total[ID0] += FABS( Residual );
PotCen += dz - Disp6;
RhoCen += RHO_NXT*RHO_NXT - Disp6;
Disp14 = Disp8;
Disp15 = Disp9;
// z = 2 plane
// =======================================
// evaluate residual
Residual = ( s_FPot[kp] + s_FPot[km] + s_FPot[jp] + s_FPot[jm] + s_FPot[ip] + s_FPot[im]
- (real)6.0*RPot2 - Const*g_Rho_Array[bx][RhoCen] );
// update potential
RPot2 += Omega_6*Residual;
// save the absolute value of residual of each grid into a shared array for evaluating the sum
s_Residual_Total[ID0] += FABS( Residual );
PotCen += dz + Disp6;
RhoCen += RHO_NXT*RHO_NXT + Disp6;
Disp14 = Disp10;
Disp15 = Disp11;
// z = 3 plane
// =======================================
// evaluate residual
Residual = ( s_FPot[kp] + s_FPot[km] + s_FPot[jp] + s_FPot[jm] + s_FPot[ip] + s_FPot[im]
- (real)6.0*RPot3 - Const*g_Rho_Array[bx][RhoCen] );
// update potential
RPot3 += Omega_6*Residual;
// save the absolute value of residual of each grid into a shared array for evaluating the sum
s_Residual_Total[ID0] += FABS( Residual );
PotCen += dz - Disp6;
RhoCen += RHO_NXT*RHO_NXT - Disp6;
Disp14 = Disp8;
Disp15 = Disp9;
// z = 4 plane
// =======================================
// evaluate residual
Residual = ( s_FPot[kp] + s_FPot[km] + s_FPot[jp] + s_FPot[jm] + s_FPot[ip] + s_FPot[im]
- (real)6.0*RPot4 - Const*g_Rho_Array[bx][RhoCen] );
// update potential
RPot4 += Omega_6*Residual;
// save the absolute value of residual of each grid into a shared array for evaluating the sum
s_Residual_Total[ID0] += FABS( Residual );
PotCen += dz + Disp6;
RhoCen += RHO_NXT*RHO_NXT + Disp6;
Disp14 = Disp10;
Disp15 = Disp11;
// z = 5 plane
// =======================================
// evaluate residual
Residual = ( s_FPot[kp] + s_FPot[km] + s_FPot[jp] + s_FPot[jm] + s_FPot[ip] + s_FPot[im]
- (real)6.0*RPot5 - Const*g_Rho_Array[bx][RhoCen] );
// update potential
RPot5 += Omega_6*Residual;
// save the absolute value of residual of each grid into a shared array for evaluating the sum
s_Residual_Total[ID0] += FABS( Residual );
PotCen += dz - Disp6;
RhoCen += RHO_NXT*RHO_NXT - Disp6;
Disp14 = Disp8;
Disp15 = Disp9;
// z = 6 plane
// =======================================
// evaluate residual
Residual = ( s_FPot[kp] + s_FPot[km] + s_FPot[jp] + s_FPot[jm] + s_FPot[ip] + s_FPot[im]
- (real)6.0*RPot6 - Const*g_Rho_Array[bx][RhoCen] );
// update potential
RPot6 += Omega_6*Residual;
// save the absolute value of residual of each grid into a shared array for evaluating the sum
s_Residual_Total[ID0] += FABS( Residual );
PotCen += dz + Disp6;
RhoCen += RHO_NXT*RHO_NXT + Disp6;
Disp14 = Disp10;
Disp15 = Disp11;
// z = 7 plane
// =======================================
// evaluate residual
Residual = ( s_FPot[kp] + s_FPot[km] + s_FPot[jp] + s_FPot[jm] + s_FPot[ip] + s_FPot[im]
- (real)6.0*RPot7 - Const*g_Rho_Array[bx][RhoCen] );
// update potential
RPot7 += Omega_6*Residual;
// save the absolute value of residual of each grid into a shared array for evaluating the sum
s_Residual_Total[ID0] += FABS( Residual );
PotCen += dz - Disp6;
RhoCen += RHO_NXT*RHO_NXT - Disp6;
Disp14 = Disp8;
Disp15 = Disp9;
// z = 8 plane
// =======================================
// evaluate residual
Residual = ( s_FPot[kp] + s_FPot[km] + s_FPot[jp] + s_FPot[jm] + s_FPot[ip] + s_FPot[im]
- (real)6.0*RPot8 - Const*g_Rho_Array[bx][RhoCen] );
// update potential
RPot8 += Omega_6*Residual;
// save the absolute value of residual of each grid into a shared array for evaluating the sum
s_Residual_Total[ID0] += FABS( Residual );
PotCen += dz + Disp6;
RhoCen += RHO_NXT*RHO_NXT + Disp6;
Disp14 = Disp10;
Disp15 = Disp11;
// z = 9 plane
// =======================================
// evaluate residual
Residual = ( s_FPot[kp] + s_FPot[km] + s_FPot[jp] + s_FPot[jm] + s_FPot[ip] + s_FPot[im]
- (real)6.0*RPot9 - Const*g_Rho_Array[bx][RhoCen] );
// update potential
RPot9 += Omega_6*Residual;
// save the absolute value of residual of each grid into a shared array for evaluating the sum
s_Residual_Total[ID0] += FABS( Residual );
PotCen += dz - Disp6;
RhoCen += RHO_NXT*RHO_NXT - Disp6;
Disp14 = Disp8;
Disp15 = Disp9;
// z = 10 plane
// =======================================
// evaluate residual
Residual = ( s_FPot[kp] + s_FPot[km] + s_FPot[jp] + s_FPot[jm] + s_FPot[ip] + s_FPot[im]
- (real)6.0*RPot10 - Const*g_Rho_Array[bx][RhoCen] );
// update potential
RPot10 += Omega_6*Residual;
// save the absolute value of residual of each grid into a shared array for evaluating the sum
s_Residual_Total[ID0] += FABS( Residual );
PotCen += dz + Disp6;
RhoCen += RHO_NXT*RHO_NXT + Disp6;
Disp14 = Disp10;
Disp15 = Disp11;
// z = 11 plane
// =======================================
// evaluate residual
Residual = ( s_FPot[kp] + s_FPot[km] + s_FPot[jp] + s_FPot[jm] + s_FPot[ip] + s_FPot[im]
- (real)6.0*RPot11 - Const*g_Rho_Array[bx][RhoCen] );
// update potential
RPot11 += Omega_6*Residual;
// save the absolute value of residual of each grid into a shared array for evaluating the sum
s_Residual_Total[ID0] += FABS( Residual );
PotCen += dz - Disp6;
RhoCen += RHO_NXT*RHO_NXT - Disp6;
Disp14 = Disp8;
Disp15 = Disp9;
// z = 12 plane
// =======================================
// evaluate residual
Residual = ( s_FPot[kp] + s_FPot[km] + s_FPot[jp] + s_FPot[jm] + s_FPot[ip] + s_FPot[im]
- (real)6.0*RPot12 - Const*g_Rho_Array[bx][RhoCen] );
// update potential
RPot12 += Omega_6*Residual;
// save the absolute value of residual of each grid into a shared array for evaluating the sum
s_Residual_Total[ID0] += FABS( Residual );
PotCen += dz + Disp6;
RhoCen += RHO_NXT*RHO_NXT + Disp6;
Disp14 = Disp10;
Disp15 = Disp11;
// z = 13 plane
// =======================================
// evaluate residual
Residual = ( s_FPot[kp] + s_FPot[km] + s_FPot[jp] + s_FPot[jm] + s_FPot[ip] + s_FPot[im]
- (real)6.0*RPot13 - Const*g_Rho_Array[bx][RhoCen] );
// update potential
RPot13 += Omega_6*Residual;
// save the absolute value of residual of each grid into a shared array for evaluating the sum
s_Residual_Total[ID0] += FABS( Residual );
#if ( POT_GHOST_SIZE == 5 )
PotCen += dz - Disp6;
RhoCen += RHO_NXT*RHO_NXT - Disp6;
Disp14 = Disp8;
Disp15 = Disp9;
// z = 14 plane
// =======================================
// evaluate residual
Residual = ( s_FPot[kp] + s_FPot[km] + s_FPot[jp] + s_FPot[jm] + s_FPot[ip] + s_FPot[im]
- (real)6.0*RPot14 - Const*g_Rho_Array[bx][RhoCen] );
// update potential
RPot14 += Omega_6*Residual;
// save the absolute value of residual of each grid into a shared array for evaluating the sum
s_Residual_Total[ID0] += FABS( Residual );
PotCen += dz + Disp6;
RhoCen += RHO_NXT*RHO_NXT + Disp6;
Disp14 = Disp10;
Disp15 = Disp11;
// z = 15 plane
// =======================================
// evaluate residual
Residual = ( s_FPot[kp] + s_FPot[km] + s_FPot[jp] + s_FPot[jm] + s_FPot[ip] + s_FPot[im]
- (real)6.0*RPot15 - Const*g_Rho_Array[bx][RhoCen] );
// update potential
RPot15 += Omega_6*Residual;
// save the absolute value of residual of each grid into a shared array for evaluating the sum
s_Residual_Total[ID0] += FABS( Residual );
#endif // #if ( POT_GHOST_SIZE == 5 )
__syncthreads();
// (b2). exchange the potential stored in the shared memory and registers
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// (b2-1). exchange the boundary potential
// ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
// -xy plane
// =======================================
SendID = __umul24(1+ty, POT_NXT_F/2) + tx+Disp5;
RecvID = __umul24(1+ty, POT_NXT_F/2) + tx+Disp4;
Temp = BPot_xy1;
BPot_xy1 = s_FPot[RecvID];
SYNCTHREADS();
s_FPot[SendID] = Temp;
// +xy plane
// =======================================
SendID = (POT_NXT_F-1)*POT_NXT_F*POT_NXT_F/2 + __umul24(1+ty, POT_NXT_F/2) + tx+Disp4;
RecvID = (POT_NXT_F-1)*POT_NXT_F*POT_NXT_F/2 + __umul24(1+ty, POT_NXT_F/2) + tx+Disp5;
Temp = BPot_xy2;
BPot_xy2 = s_FPot[RecvID];
SYNCTHREADS();
s_FPot[SendID] = Temp;
// -yz plane (boundary potential in the +-yz planes will be pasted after the exchange of internal potential)
// =======================================
RecvID = __umul24(1+ty, POT_NXT_F*POT_NXT_F/2) + __umul24(1+(tx<<1)+Disp4, POT_NXT_F/2);
Temp1 = BPot_yz1;
BPot_yz1 = s_FPot[RecvID];
// +yz plane
// =======================================
RecvID = __umul24(1+ty, POT_NXT_F*POT_NXT_F/2) + __umul24(1+(tx<<1)+Disp5, POT_NXT_F/2)
+ (POT_NXT_F/2)-1;
Temp2 = BPot_yz2;
BPot_yz2 = s_FPot[RecvID];
// -xz plane
// =======================================
SendID = __umul24(1+ty, POT_NXT_F*POT_NXT_F/2) + tx+Disp5;
RecvID = __umul24(1+ty, POT_NXT_F*POT_NXT_F/2) + tx+Disp4;
Temp = BPot_xz1;
BPot_xz1 = s_FPot[RecvID];
SYNCTHREADS();
s_FPot[SendID] = Temp;
// +xz plane
// =======================================
SendID = __umul24(1+ty, POT_NXT_F*POT_NXT_F/2) + (POT_NXT_F-1)*POT_NXT_F/2 + tx+Disp4;
RecvID = __umul24(1+ty, POT_NXT_F*POT_NXT_F/2) + (POT_NXT_F-1)*POT_NXT_F/2 + tx+Disp5;
Temp = BPot_xz2;
BPot_xz2 = s_FPot[RecvID];
SYNCTHREADS();
s_FPot[SendID] = Temp;
__syncthreads();
// (b2-2). exchange the internal potential stored in shared memory and registers
// ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
// z = 0 plane
// =======================================
SendID = PotCen0 + 0*dz + Disp4;
RecvID = PotCen0 + 0*dz + Disp5;
Temp = RPot0;
RPot0 = s_FPot[RecvID];
SYNCTHREADS();
s_FPot[SendID] = Temp;
// z = 1 plane
// =======================================
SendID = PotCen0 + 1*dz + Disp5;
RecvID = PotCen0 + 1*dz + Disp4;
Temp = RPot1;
RPot1 = s_FPot[RecvID];
SYNCTHREADS();
s_FPot[SendID] = Temp;
// z = 2 plane
// =======================================
SendID = PotCen0 + 2*dz + Disp4;
RecvID = PotCen0 + 2*dz + Disp5;
Temp = RPot2;
RPot2 = s_FPot[RecvID];
SYNCTHREADS();
s_FPot[SendID] = Temp;
// z = 3 plane
// =======================================
SendID = PotCen0 + 3*dz + Disp5;
RecvID = PotCen0 + 3*dz + Disp4;
Temp = RPot3;
RPot3 = s_FPot[RecvID];
SYNCTHREADS();
s_FPot[SendID] = Temp;
// z = 4 plane
// =======================================
SendID = PotCen0 + 4*dz + Disp4;
RecvID = PotCen0 + 4*dz + Disp5;
Temp = RPot4;
RPot4 = s_FPot[RecvID];
SYNCTHREADS();
s_FPot[SendID] = Temp;
// z = 5 plane
// =======================================
SendID = PotCen0 + 5*dz + Disp5;
RecvID = PotCen0 + 5*dz + Disp4;
Temp = RPot5;
RPot5 = s_FPot[RecvID];
SYNCTHREADS();
s_FPot[SendID] = Temp;
// z = 6 plane
// =======================================
SendID = PotCen0 + 6*dz + Disp4;
RecvID = PotCen0 + 6*dz + Disp5;
Temp = RPot6;
RPot6 = s_FPot[RecvID];
SYNCTHREADS();
s_FPot[SendID] = Temp;
// z = 7 plane
// =======================================
SendID = PotCen0 + 7*dz + Disp5;
RecvID = PotCen0 + 7*dz + Disp4;
Temp = RPot7;
RPot7 = s_FPot[RecvID];
SYNCTHREADS();
s_FPot[SendID] = Temp;
// z = 8 plane
// =======================================
SendID = PotCen0 + 8*dz + Disp4;
RecvID = PotCen0 + 8*dz + Disp5;
Temp = RPot8;
RPot8 = s_FPot[RecvID];
SYNCTHREADS();
s_FPot[SendID] = Temp;
// z = 9 plane
// =======================================
SendID = PotCen0 + 9*dz + Disp5;
RecvID = PotCen0 + 9*dz + Disp4;
Temp = RPot9;
RPot9 = s_FPot[RecvID];
SYNCTHREADS();
s_FPot[SendID] = Temp;
// z = 10 plane
// =======================================
SendID = PotCen0 + 10*dz + Disp4;
RecvID = PotCen0 + 10*dz + Disp5;
Temp = RPot10;
RPot10 = s_FPot[RecvID];
SYNCTHREADS();
s_FPot[SendID] = Temp;
// z = 11 plane
// =======================================
SendID = PotCen0 + 11*dz + Disp5;
RecvID = PotCen0 + 11*dz + Disp4;
Temp = RPot11;
RPot11 = s_FPot[RecvID];
SYNCTHREADS();
s_FPot[SendID] = Temp;
// z = 12 plane
// =======================================
SendID = PotCen0 + 12*dz + Disp4;
RecvID = PotCen0 + 12*dz + Disp5;
Temp = RPot12;
RPot12 = s_FPot[RecvID];
SYNCTHREADS();
s_FPot[SendID] = Temp;
// z = 13 plane
// =======================================
SendID = PotCen0 + 13*dz + Disp5;
RecvID = PotCen0 + 13*dz + Disp4;
Temp = RPot13;
RPot13 = s_FPot[RecvID];
SYNCTHREADS();
s_FPot[SendID] = Temp;
#if ( POT_GHOST_SIZE == 5 )
// z = 14 plane
// =======================================
SendID = PotCen0 + 14*dz + Disp4;
RecvID = PotCen0 + 14*dz + Disp5;
Temp = RPot14;
RPot14 = s_FPot[RecvID];
SYNCTHREADS();
s_FPot[SendID] = Temp;
// z = 15 plane
// =======================================
SendID = PotCen0 + 15*dz + Disp5;
RecvID = PotCen0 + 15*dz + Disp4;
Temp = RPot15;
RPot15 = s_FPot[RecvID];
SYNCTHREADS();
s_FPot[SendID] = Temp;
#endif
__syncthreads();
// (b2-3). copy the +-yz-plane boundary potential stored in the temparory registers
// back to shared memory
// ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
// -yz plane
// =======================================
SendID = __umul24(1+ty, POT_NXT_F*POT_NXT_F/2) + __umul24(1+(tx<<1)+Disp5, POT_NXT_F/2);
s_FPot[SendID] = Temp1;
// +yz plane
// =======================================
SendID = __umul24(1+ty, POT_NXT_F*POT_NXT_F/2) + __umul24(1+(tx<<1)+Disp4, POT_NXT_F/2)
+ (POT_NXT_F/2)-1;
s_FPot[SendID] = Temp2;
__syncthreads();
// (b2-4). reset parameters for pass == 1 (the odd step)
// ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
PotCen = PotCen0 + Disp2;
RhoCen = RhoCen0 + Disp2;
Disp4 = Disp2;
Disp5 = Disp1;
Disp6 = Disp12;
Disp8 = Disp13;
Disp9 = Disp1;
Disp10 = Disp7;
Disp11 = Disp2;
} // for (int pass=0; pass<2; pass++)
// (b3). perform the reduction operation to get the sum of all residuals
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// sum up the elements larger than FloorPow2 to ensure that the number of remaining elements is power-of-two
if ( ID0 < Remain ) s_Residual_Total[ID0] += s_Residual_Total[ ID0 + FloorPow2 ];
// parallel reduction
# if ( POT_NTHREAD >= 1024 )
# error : ERROR : POT_NTHREAD must < 1024 !!
# endif
# if ( POT_NTHREAD >= 512 )
if ( ID0 < 256 ) s_Residual_Total[ID0] += s_Residual_Total[ ID0 + 256 ]; __syncthreads();
# endif
# if ( POT_NTHREAD >= 256 )
if ( ID0 < 128 ) s_Residual_Total[ID0] += s_Residual_Total[ ID0 + 128 ]; __syncthreads();
# endif
# if ( POT_NTHREAD >= 128 )
if ( ID0 < 64 ) s_Residual_Total[ID0] += s_Residual_Total[ ID0 + 64 ]; __syncthreads();
# endif
// adopting warp-synchronous mechanism
if ( ID0 < 32 )
{
// declare volatile pointer to ensure that the operations are not reordered
volatile real *s_Sum = s_Residual_Total;
s_Sum[ID0] += s_Sum[ID0+32]; // here we have assumed that POT_NTHREAD >= 64
s_Sum[ID0] += s_Sum[ID0+16];
s_Sum[ID0] += s_Sum[ID0+ 8];
s_Sum[ID0] += s_Sum[ID0+ 4];
s_Sum[ID0] += s_Sum[ID0+ 2];
s_Sum[ID0] += s_Sum[ID0+ 1];
}
__syncthreads();
// (b4). termination criterion
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if ( Iter+1 >= Min_Iter && s_Residual_Total[0] > Residual_Total_Old ) break;
Residual_Total_Old = s_Residual_Total[0];
__syncthreads();
} // for (int Iter=0; Iter<Max_Iter; Iter++)
// c. store potential back to the global memory
// ---------------------------------------------------------------------------------------------------------
// (c1). internal potential stored in shared memory --> global memory (store one z-slice at a time)
const uint y = ( ID0 % (GRA_NXT*GRA_NXT/2) ) / (GRA_NXT/2);
const uint x = ( ID0 % (GRA_NXT/2) );
if ( ID0 < GRA_NXT*GRA_NXT/2 )
{
for (uint z=0; z<GRA_NXT; z++)
{
Disp4 = (y+z)&1;
ID1 = __umul24( z, GRA_NXT*GRA_NXT ) + __umul24( y, GRA_NXT )
+ 2*x+Disp4-2*(POT_USELESS2&Disp4)+POT_USELESS2;
ID2 = __umul24( z+POT_GHOST_SIZE-GRA_GHOST_SIZE, POT_NXT_F*POT_NXT_F/2 )
+ __umul24( y+POT_GHOST_SIZE-GRA_GHOST_SIZE, POT_NXT_F/2 )
+ x+(POT_GHOST_SIZE-GRA_GHOST_SIZE)/2+POT_USELESS2-(POT_USELESS2&Disp4);
g_Pot_Array_Out[bx][ID1] = s_FPot[ID2];
}
}
__syncthreads();
// (c2). internal potential stored in the registers --> global memory
#if ( POT_GHOST_SIZE == 4 )
if ( ty >= POT_GHOST_SIZE-GRA_GHOST_SIZE-1 && ty <= POT_GHOST_SIZE+GRA_NXT-2 )
{
# if ( GRA_GHOST_SIZE == 2 )
s_FPot[ PotCen0 + 1*dz ] = RPot1;
# endif
# if ( GRA_GHOST_SIZE >= 1 )
s_FPot[ PotCen0 + 2*dz ] = RPot2;
# endif
s_FPot[ PotCen0 + 3*dz ] = RPot3;
s_FPot[ PotCen0 + 4*dz ] = RPot4;
s_FPot[ PotCen0 + 5*dz ] = RPot5;
s_FPot[ PotCen0 + 6*dz ] = RPot6;
s_FPot[ PotCen0 + 7*dz ] = RPot7;
s_FPot[ PotCen0 + 8*dz ] = RPot8;
s_FPot[ PotCen0 + 9*dz ] = RPot9;
s_FPot[ PotCen0 + 10*dz ] = RPot10;
# if ( GRA_GHOST_SIZE >= 1 )
s_FPot[ PotCen0 + 11*dz ] = RPot11;
# endif
# if ( GRA_GHOST_SIZE == 2 )
s_FPot[ PotCen0 + 12*dz ] = RPot12;
# endif
}
__syncthreads();
if ( ID0 < GRA_NXT*GRA_NXT/2 )
{
for (uint z=0; z<GRA_NXT; z++)
{
Disp4 = (y+z)&1;
ID1 = __umul24( z, GRA_NXT*GRA_NXT ) + __umul24( y, GRA_NXT )
// + 2*x+(Disp4^(GRA_GHOST_SIZE/2));
+ 2*x + ( Disp4 ^ ( 1-(GRA_GHOST_SIZE&1) ) );
ID2 = __umul24( z+POT_GHOST_SIZE-GRA_GHOST_SIZE, POT_NXT_F*POT_NXT_F/2 )
+ __umul24( y+POT_GHOST_SIZE-GRA_GHOST_SIZE, POT_NXT_F/2 )
// + x+1-(Disp4&(GRA_GHOST_SIZE/2));
+ x + 2 -(GRA_GHOST_SIZE+1)/2 - ( Disp4 & (1-GRA_GHOST_SIZE&1) );
g_Pot_Array_Out[bx][ID1] = s_FPot[ID2];
}
}
__syncthreads();
#else // #if ( POT_GHOST_SIZE == 4 )
if ( ty >= POT_GHOST_SIZE-GRA_GHOST_SIZE-1 && ty <= POT_GHOST_SIZE+GRA_NXT-2 )
{
# if ( GRA_GHOST_SIZE == 2 )
s_FPot[ PotCen0 + 2*dz ] = RPot2;
# endif
# if ( GRA_GHOST_SIZE >= 1 )
s_FPot[ PotCen0 + 3*dz ] = RPot3;
# endif
s_FPot[ PotCen0 + 4*dz ] = RPot4;
s_FPot[ PotCen0 + 5*dz ] = RPot5;
s_FPot[ PotCen0 + 6*dz ] = RPot6;
s_FPot[ PotCen0 + 7*dz ] = RPot7;
s_FPot[ PotCen0 + 8*dz ] = RPot8;
s_FPot[ PotCen0 + 9*dz ] = RPot9;
s_FPot[ PotCen0 + 10*dz ] = RPot10;
s_FPot[ PotCen0 + 11*dz ] = RPot11;
# if ( GRA_GHOST_SIZE >= 1 )
s_FPot[ PotCen0 + 12*dz ] = RPot12;
# endif
# if ( GRA_GHOST_SIZE == 2 )
s_FPot[ PotCen0 + 13*dz ] = RPot13;
# endif
}
__syncthreads();
if ( ID0 < GRA_NXT*GRA_NXT/2 )
{
for (uint z=0; z<GRA_NXT; z++)
{
Disp4 = (y+z)&1;
ID1 = __umul24( z, GRA_NXT*GRA_NXT ) + __umul24( y, GRA_NXT )
// + 2*x+( (Disp4^1) ^ (GRA_GHOST_SIZE/2) );
+ 2*x + ( (Disp4^1) ^ ( 1-(GRA_GHOST_SIZE&1) ) );
ID2 = __umul24( z+POT_GHOST_SIZE-GRA_GHOST_SIZE, POT_NXT_F*POT_NXT_F/2 )
+ __umul24( y+POT_GHOST_SIZE-GRA_GHOST_SIZE, POT_NXT_F/2 )
// + x+1+( (Disp4^1) & (GRA_GHOST_SIZE&1) );
+ x + 2 - (GRA_GHOST_SIZE+1)/2 + ( (Disp4^1) & (GRA_GHOST_SIZE&1) );
g_Pot_Array_Out[bx][ID1] = s_FPot[ID2];
}
}
#endif // #if ( POT_GHOST_SIZE == 4 ) ... else ...
} // FUNCTION : CUPOT_PoissonSolver_SOR_16to18cube
#endif // #if ( defined GRAVITY && defined GPU && POT_SCHEME == SOR && !defined USE_PSOLVER_10TO14 )
|
the_stack
|
#pragma once
#include <cuda_runtime.h>
#include <math_constants.h>
#include "libvis/cuda/cuda_buffer.cuh"
#include "libvis/logging.h"
namespace vis {
// Helper for point projection using the "pixel corner" origin convention, in CUDA code.
struct PixelCornerProjector_ {
PixelCornerProjector_() = default;
// Host-only copy (should not run on device since it would be inefficient)
__host__ PixelCornerProjector_(const PixelCornerProjector_& other)
: resolution_x(other.resolution_x),
resolution_y(other.resolution_y),
min_nx(other.min_nx),
min_ny(other.min_ny),
max_nx(other.max_nx),
max_ny(other.max_ny),
grid2(other.grid2),
grid3(other.grid3),
omega(other.omega),
two_tan_omega_half(other.two_tan_omega_half),
fx(other.fx), fy(other.fy), cx(other.cx), cy(other.cy),
k1(other.k1), k2(other.k2), k3(other.k3), k4(other.k4),
p1(other.p1), p2(other.p2), sx1(other.sx1), sy1(other.sy1),
type(other.type), width(other.width), height(other.height) {}
// __forceinline__ __device__ float2 CubicHermiteSpline(
// const float2& p0,
// const float2& p1,
// const float2& p2,
// const float2& p3,
// const float x) const {
// const float2 a = make_float2(
// static_cast<float>(0.5) * (-p0.x + static_cast<float>(3.0) * p1.x - static_cast<float>(3.0) * p2.x + p3.x),
// static_cast<float>(0.5) * (-p0.y + static_cast<float>(3.0) * p1.y - static_cast<float>(3.0) * p2.y + p3.y));
// const float2 b = make_float2(
// static_cast<float>(0.5) * (static_cast<float>(2.0) * p0.x - static_cast<float>(5.0) * p1.x + static_cast<float>(4.0) * p2.x - p3.x),
// static_cast<float>(0.5) * (static_cast<float>(2.0) * p0.y - static_cast<float>(5.0) * p1.y + static_cast<float>(4.0) * p2.y - p3.y));
// const float2 c = make_float2(
// static_cast<float>(0.5) * (-p0.x + p2.x),
// static_cast<float>(0.5) * (-p0.y + p2.y));
// const float2 d = p1;
//
// // Use Horner's rule to evaluate the function value and its
// // derivative.
//
// // f = ax^3 + bx^2 + cx + d
// return make_float2(
// d.x + x * (c.x + x * (b.x + x * a.x)),
// d.y + x * (c.y + x * (b.y + x * a.y)));
// }
// opcount = 486
template <typename Scalar>
__forceinline__ __device__ void CentralGenericBSpline_UnprojectFromPixelCornerConv_ComputeResidualAndJacobian(
Scalar frac_x, Scalar frac_y, float3 p[4][4], float3* result,
float* dresult_dxy_0_0, float* dresult_dxy_0_1,
float* dresult_dxy_1_0, float* dresult_dxy_1_1,
float* dresult_dxy_2_0, float* dresult_dxy_2_1) const {
const Scalar term0 = 0.166666666666667f*frac_y;
const Scalar term1 = -term0 + 0.666666666666667f;
const Scalar term2 = (frac_y - 4) * (frac_y - 4);
const Scalar term3 = (frac_x - 4) * (frac_x - 4);
const Scalar term4 = 0.166666666666667f*frac_x;
const Scalar term5 = -term4 + 0.666666666666667f;
const Scalar term6 = p[0][0].x*term5;
const Scalar term7 = (frac_x - 3) * (frac_x - 3);
const Scalar term8 = term4 - 0.5f;
const Scalar term9 = p[0][3].x*term8;
const Scalar term10 = frac_x * frac_x;
const Scalar term11 = 0.5*frac_x*term10;
const Scalar term12 = 19.5f*frac_x - 5.5*term10 + term11 - 21.8333333333333f;
const Scalar term13 = -16*frac_x + 5*term10 - term11 + 16.6666666666667f;
const Scalar term14 = p[0][1].x*term12 + p[0][2].x*term13 + term3*term6 + term7*term9;
const Scalar term15 = term14*term2;
const Scalar term16 = term1*term15;
const Scalar term17 = term0 - 0.5f;
const Scalar term18 = (frac_y - 3) * (frac_y - 3);
const Scalar term19 = p[3][0].x*term5;
const Scalar term20 = p[3][3].x*term8;
const Scalar term21 = p[3][1].x*term12 + p[3][2].x*term13 + term19*term3 + term20*term7;
const Scalar term22 = term18*term21;
const Scalar term23 = term17*term22;
const Scalar term24 = frac_y * frac_y;
const Scalar term25 = 0.5f*frac_y*term24;
const Scalar term26 = -16*frac_y + 5*term24 - term25 + 16.6666666666667f;
const Scalar term27 = p[2][0].x*term5;
const Scalar term28 = p[2][3].x*term8;
const Scalar term29 = p[2][1].x*term12 + p[2][2].x*term13 + term27*term3 + term28*term7;
const Scalar term30 = term26*term29;
const Scalar term31 = 19.5f*frac_y - 5.5f*term24 + term25 - 21.8333333333333f;
const Scalar term32 = p[1][0].x*term5;
const Scalar term33 = p[1][3].x*term8;
const Scalar term34 = p[1][1].x*term12 + p[1][2].x*term13 + term3*term32 + term33*term7;
const Scalar term35 = term31*term34;
const Scalar term36 = term16 + term23 + term30 + term35;
const Scalar term37 = p[0][0].y*term5;
const Scalar term38 = p[0][3].y*term8;
const Scalar term39 = p[0][1].y*term12 + p[0][2].y*term13 + term3*term37 + term38*term7;
const Scalar term40 = term2*term39;
const Scalar term41 = term1*term40;
const Scalar term42 = p[3][0].y*term5;
const Scalar term43 = p[3][3].y*term8;
const Scalar term44 = p[3][1].y*term12 + p[3][2].y*term13 + term3*term42 + term43*term7;
const Scalar term45 = term18*term44;
const Scalar term46 = term17*term45;
const Scalar term47 = p[2][0].y*term5;
const Scalar term48 = p[2][3].y*term8;
const Scalar term49 = p[2][1].y*term12 + p[2][2].y*term13 + term3*term47 + term48*term7;
const Scalar term50 = term26*term49;
const Scalar term51 = p[1][0].y*term5;
const Scalar term52 = p[1][3].y*term8;
const Scalar term53 = p[1][1].y*term12 + p[1][2].y*term13 + term3*term51 + term52*term7;
const Scalar term54 = term31*term53;
const Scalar term55 = term41 + term46 + term50 + term54;
const Scalar term56 = p[0][0].z*term5;
const Scalar term57 = p[0][3].z*term8;
const Scalar term58 = p[0][1].z*term12 + p[0][2].z*term13 + term3*term56 + term57*term7;
const Scalar term59 = term2*term58;
const Scalar term60 = term1*term59;
const Scalar term61 = p[3][0].z*term5;
const Scalar term62 = p[3][3].z*term8;
const Scalar term63 = p[3][1].z*term12 + p[3][2].z*term13 + term3*term61 + term62*term7;
const Scalar term64 = term18*term63;
const Scalar term65 = term17*term64;
const Scalar term66 = p[2][0].z*term5;
const Scalar term67 = p[2][3].z*term8;
const Scalar term68 = p[2][1].z*term12 + p[2][2].z*term13 + term3*term66 + term67*term7;
const Scalar term69 = term26*term68;
const Scalar term70 = p[1][0].z*term5;
const Scalar term71 = p[1][3].z*term8;
const Scalar term72 = p[1][1].z*term12 + p[1][2].z*term13 + term3*term70 + term7*term71;
const Scalar term73 = term31*term72;
const Scalar term74 = term60 + term65 + term69 + term73;
const Scalar term75 = (term36 * term36) + (term55 * term55) + (term74 * term74);
const Scalar term76 = 1.f / sqrt(term75);
const Scalar term77 = term1*term2;
const Scalar term78 = 0.166666666666667f*term3;
const Scalar term79 = 0.166666666666667f*term7;
const Scalar term80 = 1.5f*term10;
const Scalar term81 = -11.0f*frac_x + term80 + 19.5f;
const Scalar term82 = 10*frac_x - term80 - 16;
const Scalar term83 = 2*frac_x;
const Scalar term84 = term83 - 8;
const Scalar term85 = term83 - 6;
const Scalar term86 = term17*term18;
const Scalar term87 = term26*(-p[2][0].x*term78 + p[2][1].x*term81 + p[2][2].x*term82 + p[2][3].x*term79 + term27*term84 + term28*term85) + term31*(-p[1][0].x*term78 + p[1][1].x*term81 + p[1][2].x*term82 + p[1][3].x*term79 + term32*term84 + term33*term85) + term77*(-p[0][0].x*term78 + p[0][1].x*term81 + p[0][2].x*term82 + p[0][3].x*term79 + term6*term84 + term85*term9) + term86*(-p[3][0].x*term78 + p[3][1].x*term81 + p[3][2].x*term82 + p[3][3].x*term79 + term19*term84 + term20*term85);
const Scalar term88b = 1.f / sqrt(term75);
const Scalar term88 = term88b * term88b * term88b;
const Scalar term89 = (1.0f/2.0f)*term16 + (1.0f/2.0f)*term23 + (1.0f/2.0f)*term30 + (1.0f/2.0f)*term35;
const Scalar term90 = (1.0f/2.0f)*term41 + (1.0f/2.0f)*term46 + (1.0f/2.0f)*term50 + (1.0f/2.0f)*term54;
const Scalar term91 = term26*(-p[2][0].y*term78 + p[2][1].y*term81 + p[2][2].y*term82 + p[2][3].y*term79 + term47*term84 + term48*term85) + term31*(-p[1][0].y*term78 + p[1][1].y*term81 + p[1][2].y*term82 + p[1][3].y*term79 + term51*term84 + term52*term85) + term77*(-p[0][0].y*term78 + p[0][1].y*term81 + p[0][2].y*term82 + p[0][3].y*term79 + term37*term84 + term38*term85) + term86*(-p[3][0].y*term78 + p[3][1].y*term81 + p[3][2].y*term82 + p[3][3].y*term79 + term42*term84 + term43*term85);
const Scalar term92 = (1.0f/2.0f)*term60 + (1.0f/2.0f)*term65 + (1.0f/2.0f)*term69 + (1.0f/2.0f)*term73;
const Scalar term93 = term26*(-p[2][0].z*term78 + p[2][1].z*term81 + p[2][2].z*term82 + p[2][3].z*term79 + term66*term84 + term67*term85) + term31*(-p[1][0].z*term78 + p[1][1].z*term81 + p[1][2].z*term82 + p[1][3].z*term79 + term70*term84 + term71*term85) + term77*(-p[0][0].z*term78 + p[0][1].z*term81 + p[0][2].z*term82 + p[0][3].z*term79 + term56*term84 + term57*term85) + term86*(-p[3][0].z*term78 + p[3][1].z*term81 + p[3][2].z*term82 + p[3][3].z*term79 + term61*term84 + term62*term85);
const Scalar term94 = 2*term88*(term87*term89 + term90*term91 + term92*term93);
const Scalar term95 = 1.5f*term24;
const Scalar term96 = 10*frac_y - term95 - 16;
const Scalar term97 = term29*term96;
const Scalar term98 = -11.0f*frac_y + term95 + 19.5f;
const Scalar term99 = term34*term98;
const Scalar term100 = 2*frac_y;
const Scalar term101 = term1*(term100 - 8);
const Scalar term102 = term101*term14;
const Scalar term103 = term17*(term100 - 6);
const Scalar term104 = term103*term21;
const Scalar term105 = term49*term96;
const Scalar term106 = term53*term98;
const Scalar term107 = term101*term39;
const Scalar term108 = term103*term44;
const Scalar term109 = term68*term96;
const Scalar term110 = term72*term98;
const Scalar term111 = term101*term58;
const Scalar term112 = term103*term63;
const Scalar term113 = term88*(term89*(2*term102 + 2*term104 - 0.333333333333333f*term15 + 0.333333333333333f*term22 + 2*term97 + 2*term99) + term90*(2*term105 + 2*term106 + 2*term107 + 2*term108 - 0.333333333333333f*term40 + 0.333333333333333f*term45) + term92*(2*term109 + 2*term110 + 2*term111 + 2*term112 - 0.333333333333333f*term59 + 0.333333333333333f*term64));
(*result).x = term36*term76;
(*result).y = term55*term76;
(*result).z = term74*term76;
*dresult_dxy_0_0 = -term36*term94 + term76*term87;
*dresult_dxy_0_1 = -term113*term36 + term76*(term102 + term104 - 0.166666666666667f*term15 + 0.166666666666667f*term22 + term97 + term99);
*dresult_dxy_1_0 = -term55*term94 + term76*term91;
*dresult_dxy_1_1 = -term113*term55 + term76*(term105 + term106 + term107 + term108 - 0.166666666666667f*term40 + 0.166666666666667f*term45);
*dresult_dxy_2_0 = -term74*term94 + term76*term93;
*dresult_dxy_2_1 = -term113*term74 + term76*(term109 + term110 + term111 + term112 - 0.166666666666667f*term59 + 0.166666666666667f*term64);
}
__forceinline__ __device__ bool IsInCalibratedImageArea(float x, float y) const {
return x >= min_nx && y >= min_ny &&
x < max_nx + 1 && y < max_ny + 1;
}
/// Inverse of GridPointToPixelCornerConv().
__forceinline__ __device__ float2 PixelCornerConvToGridPoint(float x, float y) const {
return make_float2(
1.f + (grid3.width() - 3.f) * (x - min_nx) / (max_nx + 1 - min_nx),
1.f + (grid3.height() - 3.f) * (y - min_ny) / (max_ny + 1 - min_ny));
}
__forceinline__ __device__ float PixelScaleToGridScaleX(float length) const {
return length * ((grid3.width() - 3.f) / (max_nx + 1 - min_nx));
}
__forceinline__ __device__ float PixelScaleToGridScaleY(float length) const {
return length * ((grid3.height() - 3.f) / (max_ny + 1 - min_ny));
}
__forceinline__ __device__ bool UnprojectFromPixelCornerConvWithJacobian(
float x, float y, float3* result,
float* dresult_dxy_0_0, float* dresult_dxy_0_1,
float* dresult_dxy_1_0, float* dresult_dxy_1_1,
float* dresult_dxy_2_0, float* dresult_dxy_2_1) const {
if (!IsInCalibratedImageArea(x, y)) {
return false;
}
float2 grid_point = PixelCornerConvToGridPoint(x, y);
grid_point.x += 2;
grid_point.y += 2;
int ix = ::floor(grid_point.x);
int iy = ::floor(grid_point.y);
float frac_x = grid_point.x - (ix - 3);
float frac_y = grid_point.y - (iy - 3);
float3 p[4][4];
for (int y = 0; y < 4; ++ y) {
for (int x = 0; x < 4; ++ x) {
p[y][x] = grid3(iy - 3 + y, ix - 3 + x);
}
}
CentralGenericBSpline_UnprojectFromPixelCornerConv_ComputeResidualAndJacobian(frac_x, frac_y, p, result, dresult_dxy_0_0, dresult_dxy_0_1, dresult_dxy_1_0, dresult_dxy_1_1, dresult_dxy_2_0, dresult_dxy_2_1);
*dresult_dxy_0_0 = PixelScaleToGridScaleX(*dresult_dxy_0_0);
*dresult_dxy_0_1 = PixelScaleToGridScaleY(*dresult_dxy_0_1);
*dresult_dxy_1_0 = PixelScaleToGridScaleX(*dresult_dxy_1_0);
*dresult_dxy_1_1 = PixelScaleToGridScaleY(*dresult_dxy_1_1);
*dresult_dxy_2_0 = PixelScaleToGridScaleX(*dresult_dxy_2_0);
*dresult_dxy_2_1 = PixelScaleToGridScaleY(*dresult_dxy_2_1);
return true;
}
// Assumes that position.z > 0.
__forceinline__ __device__ float2 Project(float3 position) const {
// Pinhole camera. NOTE: Commented out for shorter compile times. Find a better solution.
// // if (type == Camera::Type::kPinholeCamera4f) {
// return make_float2(fx * (position.x / position.z) + cx,
// fy * (position.y / position.z) + cy);
// // }
// RadtanCamera8d. NOTE: Commented out for shorter compile times. Find a better solution.
// // if (type == Camera::Type::kRadtanCamera8d) {
// float2 undistorted_point = make_float2(position.x / position.z,
// position.y / position.z);
// const float mx2_u = undistorted_point.x * undistorted_point.x;
// const float my2_u = undistorted_point.y * undistorted_point.y;
// const float mxy_u = undistorted_point.x * undistorted_point.y;
// const float rho2_u = mx2_u + my2_u;
// const float rad_dist_u = k1 * rho2_u + k2 * rho2_u * rho2_u;
// float2 distorted_point = make_float2(undistorted_point.x + undistorted_point.x * rad_dist_u + 2.0f * p1 * mxy_u + p2 * (rho2_u + 2.0f * mx2_u),
// undistorted_point.y + undistorted_point.y * rad_dist_u + 2.0f * p2 * mxy_u + p1 * (rho2_u + 2.0f * my2_u));
// return make_float2(fx * distorted_point.x + cx,
// fy * distorted_point.y + cy);
// // }
// -------------------------------------------------------------------------
// FovCamera5f.
// if (type == 3 /*Camera::Type::FovCamera5f*/) {
// float2 nxy = make_float2(position.x / position.z,
// position.y / position.z);
//
// const float r = sqrtf(nxy.x * nxy.x + nxy.y * nxy.y);
// const float kEpsilon = static_cast<float>(1e-6);
// const float factor =
// (r < kEpsilon) ?
// 1.f :
// (atanf(r * two_tan_omega_half) / (r * omega));
// return make_float2(fx * factor * nxy.x + cx,
// fy * factor * nxy.y + cy);
// -------------------------------------------------------------------------
// ThinPrismFisheyeCamera12d.
// } else if (type == 3 /*Camera::Type::kThinPrismFisheyeCamera12d*/) {
// float2 undistorted_nxy = make_float2(position.x / position.z,
// position.y / position.z);
//
// float r = sqrtf(undistorted_nxy.x * undistorted_nxy.x + undistorted_nxy.y * undistorted_nxy.y);
//
// // if (r > radius_cutoff_) {
// // return Eigen::Vector2f((undistorted_nxy.x < 0) ? -100 : 100,
// // (undistorted_nxy.y < 0) ? -100 : 100);
// // }
//
// float fisheye_x, fisheye_y;
// const float kEpsilon = static_cast<float>(1e-6);
// if (r > kEpsilon) {
// float theta_by_r = atanf(r) / r;
// fisheye_x = theta_by_r * undistorted_nxy.x;
// fisheye_y = theta_by_r * undistorted_nxy.y;
// } else {
// fisheye_x = undistorted_nxy.x;
// fisheye_y = undistorted_nxy.y;
// }
//
// const float x2 = fisheye_x * fisheye_x;
// const float xy = fisheye_x * fisheye_y;
// const float y2 = fisheye_y * fisheye_y;
// const float r2 = x2 + y2;
// const float r4 = r2 * r2;
// const float r6 = r4 * r2;
// const float r8 = r6 * r2;
//
// const float radial =
// k1 * r2 + k2 * r4 + k3 * r6 + k4 * r8;
// const float dx = static_cast<float>(2) * p1 * xy + p2 * (r2 + static_cast<float>(2) * x2) + sx1 * r2;
// const float dy = static_cast<float>(2) * p2 * xy + p1 * (r2 + static_cast<float>(2) * y2) + sy1 * r2;
//
// float nx = fisheye_x + radial * fisheye_x + dx;
// float ny = fisheye_y + radial * fisheye_y + dy;
//
// return make_float2(fx * nx + cx,
// fy * ny + cy);
// } else if (type == 0 /*Camera::Type::kInvalid*/) {
// -----------------------------------------------------------------------
// TODO: HACK for the CentralGenericBSplineModel from the camera_calibration project.
// There should instead be a sane possibility for passing in external projection models.
// NOTE: We are not caring for the special case of ||position|| == 0 here,
// as the resulting NaN/Inf should not lead to the position being
// projected anyway.
float length = sqrtf(position.x * position.x + position.y * position.y + position.z * position.z);
float3 point_direction = make_float3(position.x / length, position.y / length, position.z / length);
// Define initial estimate
float2 result = make_float2(0.5f * (min_nx + max_nx + 1),
0.5f * (min_ny + max_ny + 1));
// Gauss-Newton optimization algorithm.
constexpr float kEpsilon = 1e-10f; // NOTE: This threshold has been increased compared to the CPU version, which uses 1e-12f.
const usize kMaxIterations = 100;
bool left_calibrated_area_before = false;
(void) left_calibrated_area_before;
bool converged = false;
for (usize i = 0; i < kMaxIterations; ++i) {
float ddxy_dxy_0_0;
float ddxy_dxy_0_1;
float ddxy_dxy_1_0;
float ddxy_dxy_1_1;
float ddxy_dxy_2_0;
float ddxy_dxy_2_1;
float3 direction;
UnprojectFromPixelCornerConvWithJacobian(result.x, result.y, &direction, &ddxy_dxy_0_0, &ddxy_dxy_0_1, &ddxy_dxy_1_0, &ddxy_dxy_1_1, &ddxy_dxy_2_0, &ddxy_dxy_2_1);
// (Non-squared) residuals.
float dx = direction.x - point_direction.x;
float dy = direction.y - point_direction.y;
float dz = direction.z - point_direction.z;
// Accumulate H and b.
float H_0_0 = ddxy_dxy_0_0 * ddxy_dxy_0_0 + ddxy_dxy_1_0 * ddxy_dxy_1_0 + ddxy_dxy_2_0 * ddxy_dxy_2_0;
float H_1_0_and_0_1 = ddxy_dxy_0_0 * ddxy_dxy_0_1 + ddxy_dxy_1_0 * ddxy_dxy_1_1 + ddxy_dxy_2_0 * ddxy_dxy_2_1;
float H_1_1 = ddxy_dxy_0_1 * ddxy_dxy_0_1 + ddxy_dxy_1_1 * ddxy_dxy_1_1 + ddxy_dxy_2_1 * ddxy_dxy_2_1;
float b_0 = dx * ddxy_dxy_0_0 + dy * ddxy_dxy_1_0 + dz * ddxy_dxy_2_0;
float b_1 = dx * ddxy_dxy_0_1 + dy * ddxy_dxy_1_1 + dz * ddxy_dxy_2_1;
// Solve the system and update the parameters.
// Make sure that the matrix is positive definite
// (instead of only semi-positive definite).
constexpr float kDiagEpsilon = 1e-6f;
H_0_0 += kDiagEpsilon;
H_1_1 += kDiagEpsilon;
// Perform in-place Cholesky decomposition of H
H_0_0 = sqrtf(H_0_0);
H_1_0_and_0_1 = H_1_0_and_0_1 / H_0_0;
H_1_1 = sqrtf(H_1_1 - H_1_0_and_0_1 * H_1_0_and_0_1);
// Solve H * x = b for x.
//
// (H_0_0 0) (H_0_0 H_0_1) (x0) (b0)
// (H_1_0 H_1_1) * ( 0 H_1_1) * (x1) = (b1)
//
// Naming the result of the second multiplication y, we get:
//
// (H_0_0 0) (y0) (b0)
// (H_1_0 H_1_1) * (y1) = (b1)
//
// and:
//
// (H_0_0 H_0_1) * (x0) = (y0)
// ( 0 H_1_1) (x1) = (y1)
float y_0 = b_0 / H_0_0;
float y_1 = (b_1 - H_1_0_and_0_1 * y_0) / H_1_1;
float x_1 = y_1 / H_1_1;
float x_0 = (y_0 - H_1_0_and_0_1 * x_1) / H_0_0;
result.x -= x_0;
result.y -= x_1;
// Check whether the estimated projection has left the calibrated image
// area. This check should catch NaNs as well. We do not return false
// immediately when this happens, but only if it happens for two iterations
// in a row. This is because the Gauss-Newton step may overestimate the
// step size and thus leave the image area slightly for points that project
// close to the border of the image.
if (!IsInCalibratedImageArea(result.x, result.y)) {
#ifdef __CUDA_ARCH__
if (left_calibrated_area_before || ::isnan(result.x)) {
return make_float2(-99999, -99999);
}
#else
LOG(FATAL) << "Must never be called.";
#endif
left_calibrated_area_before = true;
// Clamp projection back into the calibrated area for the next step.
// The #ifdef avoids trouble with CUDA's min/max apparently not being
// visible outside of nvcc.
#ifdef __CUDA_ARCH__
result = make_float2(
::min(max_nx + 0.999f, ::max(result.x, static_cast<float>(min_nx))),
::min(max_ny + 0.999f, ::max(result.y, static_cast<float>(min_ny))));
#else
LOG(FATAL) << "Must never be called.";
#endif
} else {
left_calibrated_area_before = false;
if (dx * dx + dy * dy + dz * dz < kEpsilon) {
converged = true;
break;
}
}
}
return converged ? result : make_float2(-99999, -99999);
// } // -------------------------------------------------------------------
// kNonParametricBicubicProjectionCamerad. NOTE: Commented out for shorter compile times. Find a better solution.
// // For nonparametric bicubic projection camera:
// float2 undistorted_nxy = make_float2(position.x / position.z,
// position.y / position.z);
//
// float fc = (undistorted_nxy.x - min_nx) * ((resolution_x - 1) / (max_nx - min_nx));
// float fr = (undistorted_nxy.y - min_ny) * ((resolution_y - 1) / (max_ny - min_ny));
// const int row = ::floor(fr);
// const int col = ::floor(fc);
// float r_frac = fr - row;
// float c_frac = fc - col;
//
// int c[4];
// int r[4];
// for (int i = 0; i < 4; ++ i) {
// c[i] = min(max(0, col - 1 + i), resolution_x - 1);
// r[i] = min(max(0, row - 1 + i), resolution_y - 1);
// }
//
// float2 f[4];
// for (int wrow = 0; wrow < 4; ++ wrow) {
// float2 p0 = grid(r[wrow], c[0]);
// float2 p1 = grid(r[wrow], c[1]);
// float2 p2 = grid(r[wrow], c[2]);
// float2 p3 = grid(r[wrow], c[3]);
//
// f[wrow] = CubicHermiteSpline(p0, p1, p2, p3, c_frac);
// }
//
// return CubicHermiteSpline(f[0], f[1], f[2], f[3], r_frac);
}
int resolution_x;
int resolution_y;
float min_nx;
float min_ny;
float max_nx;
float max_ny;
CUDABuffer_<float2> grid2;
CUDABuffer_<float3> grid3;
float omega;
float two_tan_omega_half;
float fx, fy, cx, cy;
float k1, k2, k3, k4, p1, p2;
float sx1, sy1;
int type; // from Camera::Type enum
int width;
int height;
};
}
|
the_stack
|
#include <map>
#include <stdint.h>
#include "miner.h"
#include "salsa_kernel.h"
#include "cuda_helper.h"
typedef uint32_t sph_u32;
#define SPH_ROTL32 ROTL32
#define SPH_ROTR32 ROTR32
__constant__ uint64_t ptarget64[4];
__constant__ uint32_t pdata[20];
// define some error checking macros
#define DELIMITER '/'
#define __FILENAME__ ( strrchr(__FILE__, DELIMITER) != NULL ? strrchr(__FILE__, DELIMITER)+1 : __FILE__ )
#undef checkCudaErrors
#define checkCudaErrors(x) \
{ \
cudaGetLastError(); \
x; \
cudaError_t err = cudaGetLastError(); \
if (err != cudaSuccess && !abort_flag) \
applog(LOG_ERR, "GPU #%d: cudaError %d (%s) (%s line %d)\n", device_map[thr_id], err, cudaGetErrorString(err), __FILENAME__, __LINE__); \
}
// from salsa_kernel.cu
extern std::map<int, uint32_t *> context_idata[2];
extern std::map<int, uint32_t *> context_odata[2];
extern std::map<int, cudaStream_t> context_streams[2];
extern std::map<int, uint32_t *> context_hash[2];
#ifdef _MSC_VER
#pragma warning (disable: 4146)
#endif
/**
* Encode a 32-bit value into the provided buffer (big endian convention).
*
* @param dst the destination buffer
* @param val the 32-bit value to encode
*/
static __device__ void
cuda_sph_enc32be(void *dst, sph_u32 val)
{
*(sph_u32 *)dst = cuda_swab32(val);
}
#define Z00 0
#define Z01 1
#define Z02 2
#define Z03 3
#define Z04 4
#define Z05 5
#define Z06 6
#define Z07 7
#define Z08 8
#define Z09 9
#define Z0A A
#define Z0B B
#define Z0C C
#define Z0D D
#define Z0E E
#define Z0F F
#define Z10 E
#define Z11 A
#define Z12 4
#define Z13 8
#define Z14 9
#define Z15 F
#define Z16 D
#define Z17 6
#define Z18 1
#define Z19 C
#define Z1A 0
#define Z1B 2
#define Z1C B
#define Z1D 7
#define Z1E 5
#define Z1F 3
#define Z20 B
#define Z21 8
#define Z22 C
#define Z23 0
#define Z24 5
#define Z25 2
#define Z26 F
#define Z27 D
#define Z28 A
#define Z29 E
#define Z2A 3
#define Z2B 6
#define Z2C 7
#define Z2D 1
#define Z2E 9
#define Z2F 4
#define Z30 7
#define Z31 9
#define Z32 3
#define Z33 1
#define Z34 D
#define Z35 C
#define Z36 B
#define Z37 E
#define Z38 2
#define Z39 6
#define Z3A 5
#define Z3B A
#define Z3C 4
#define Z3D 0
#define Z3E F
#define Z3F 8
#define Z40 9
#define Z41 0
#define Z42 5
#define Z43 7
#define Z44 2
#define Z45 4
#define Z46 A
#define Z47 F
#define Z48 E
#define Z49 1
#define Z4A B
#define Z4B C
#define Z4C 6
#define Z4D 8
#define Z4E 3
#define Z4F D
#define Z50 2
#define Z51 C
#define Z52 6
#define Z53 A
#define Z54 0
#define Z55 B
#define Z56 8
#define Z57 3
#define Z58 4
#define Z59 D
#define Z5A 7
#define Z5B 5
#define Z5C F
#define Z5D E
#define Z5E 1
#define Z5F 9
#define Z60 C
#define Z61 5
#define Z62 1
#define Z63 F
#define Z64 E
#define Z65 D
#define Z66 4
#define Z67 A
#define Z68 0
#define Z69 7
#define Z6A 6
#define Z6B 3
#define Z6C 9
#define Z6D 2
#define Z6E 8
#define Z6F B
#define Z70 D
#define Z71 B
#define Z72 7
#define Z73 E
#define Z74 C
#define Z75 1
#define Z76 3
#define Z77 9
#define Z78 5
#define Z79 0
#define Z7A F
#define Z7B 4
#define Z7C 8
#define Z7D 6
#define Z7E 2
#define Z7F A
#define Z80 6
#define Z81 F
#define Z82 E
#define Z83 9
#define Z84 B
#define Z85 3
#define Z86 0
#define Z87 8
#define Z88 C
#define Z89 2
#define Z8A D
#define Z8B 7
#define Z8C 1
#define Z8D 4
#define Z8E A
#define Z8F 5
#define Z90 A
#define Z91 2
#define Z92 8
#define Z93 4
#define Z94 7
#define Z95 6
#define Z96 1
#define Z97 5
#define Z98 F
#define Z99 B
#define Z9A 9
#define Z9B E
#define Z9C 3
#define Z9D C
#define Z9E D
#define Z9F 0
#define Mx(r, i) Mx_(Z ## r ## i)
#define Mx_(n) Mx__(n)
#define Mx__(n) M ## n
#define CSx(r, i) CSx_(Z ## r ## i)
#define CSx_(n) CSx__(n)
#define CSx__(n) CS ## n
#define CS0 SPH_C32(0x243F6A88)
#define CS1 SPH_C32(0x85A308D3)
#define CS2 SPH_C32(0x13198A2E)
#define CS3 SPH_C32(0x03707344)
#define CS4 SPH_C32(0xA4093822)
#define CS5 SPH_C32(0x299F31D0)
#define CS6 SPH_C32(0x082EFA98)
#define CS7 SPH_C32(0xEC4E6C89)
#define CS8 SPH_C32(0x452821E6)
#define CS9 SPH_C32(0x38D01377)
#define CSA SPH_C32(0xBE5466CF)
#define CSB SPH_C32(0x34E90C6C)
#define CSC SPH_C32(0xC0AC29B7)
#define CSD SPH_C32(0xC97C50DD)
#define CSE SPH_C32(0x3F84D5B5)
#define CSF SPH_C32(0xB5470917)
#define GS(m0, m1, c0, c1, a, b, c, d) do { \
a = SPH_T32(a + b + (m0 ^ c1)); \
d = SPH_ROTR32(d ^ a, 16); \
c = SPH_T32(c + d); \
b = SPH_ROTR32(b ^ c, 12); \
a = SPH_T32(a + b + (m1 ^ c0)); \
d = SPH_ROTR32(d ^ a, 8); \
c = SPH_T32(c + d); \
b = SPH_ROTR32(b ^ c, 7); \
} while (0)
#define ROUND_S(r) do { \
GS(Mx(r, 0), Mx(r, 1), CSx(r, 0), CSx(r, 1), V0, V4, V8, VC); \
GS(Mx(r, 2), Mx(r, 3), CSx(r, 2), CSx(r, 3), V1, V5, V9, VD); \
GS(Mx(r, 4), Mx(r, 5), CSx(r, 4), CSx(r, 5), V2, V6, VA, VE); \
GS(Mx(r, 6), Mx(r, 7), CSx(r, 6), CSx(r, 7), V3, V7, VB, VF); \
GS(Mx(r, 8), Mx(r, 9), CSx(r, 8), CSx(r, 9), V0, V5, VA, VF); \
GS(Mx(r, A), Mx(r, B), CSx(r, A), CSx(r, B), V1, V6, VB, VC); \
GS(Mx(r, C), Mx(r, D), CSx(r, C), CSx(r, D), V2, V7, V8, VD); \
GS(Mx(r, E), Mx(r, F), CSx(r, E), CSx(r, F), V3, V4, V9, VE); \
} while (0)
#define COMPRESS32 do { \
sph_u32 M0, M1, M2, M3, M4, M5, M6, M7; \
sph_u32 M8, M9, MA, MB, MC, MD, ME, MF; \
sph_u32 V0, V1, V2, V3, V4, V5, V6, V7; \
sph_u32 V8, V9, VA, VB, VC, VD, VE, VF; \
V0 = H0; \
V1 = H1; \
V2 = H2; \
V3 = H3; \
V4 = H4; \
V5 = H5; \
V6 = H6; \
V7 = H7; \
V8 = S0 ^ CS0; \
V9 = S1 ^ CS1; \
VA = S2 ^ CS2; \
VB = S3 ^ CS3; \
VC = T0 ^ CS4; \
VD = T0 ^ CS5; \
VE = T1 ^ CS6; \
VF = T1 ^ CS7; \
M0 = input[0]; \
M1 = input[1]; \
M2 = input[2]; \
M3 = input[3]; \
M4 = input[4]; \
M5 = input[5]; \
M6 = input[6]; \
M7 = input[7]; \
M8 = input[8]; \
M9 = input[9]; \
MA = input[10]; \
MB = input[11]; \
MC = input[12]; \
MD = input[13]; \
ME = input[14]; \
MF = input[15]; \
ROUND_S(0); \
ROUND_S(1); \
ROUND_S(2); \
ROUND_S(3); \
ROUND_S(4); \
ROUND_S(5); \
ROUND_S(6); \
ROUND_S(7); \
H0 ^= S0 ^ V0 ^ V8; \
H1 ^= S1 ^ V1 ^ V9; \
H2 ^= S2 ^ V2 ^ VA; \
H3 ^= S3 ^ V3 ^ VB; \
H4 ^= S0 ^ V4 ^ VC; \
H5 ^= S1 ^ V5 ^ VD; \
H6 ^= S2 ^ V6 ^ VE; \
H7 ^= S3 ^ V7 ^ VF; \
} while (0)
__global__
void cuda_blake256_hash( uint64_t *g_out, uint32_t nonce, uint32_t *g_good, bool validate )
{
uint32_t input[16];
uint64_t output[4];
#pragma unroll
for (int i=0; i < 16; ++i) input[i] = pdata[i];
sph_u32 H0 = 0x6A09E667;
sph_u32 H1 = 0xBB67AE85;
sph_u32 H2 = 0x3C6EF372;
sph_u32 H3 = 0xA54FF53A;
sph_u32 H4 = 0x510E527F;
sph_u32 H5 = 0x9B05688C;
sph_u32 H6 = 0x1F83D9AB;
sph_u32 H7 = 0x5BE0CD19;
sph_u32 S0 = 0;
sph_u32 S1 = 0;
sph_u32 S2 = 0;
sph_u32 S3 = 0;
sph_u32 T0 = 0;
sph_u32 T1 = 0;
T0 = SPH_T32(T0 + 512);
COMPRESS32;
#pragma unroll
for (int i=0; i < 3; ++i) input[i] = pdata[16+i];
input[3] = nonce + ((blockIdx.x * blockDim.x) + threadIdx.x);
input[4] = 0x80000000;
#pragma unroll 8
for (int i=5; i < 13; ++i) input[i] = 0;
input[13] = 0x00000001;
input[14] = T1;
input[15] = T0 + 128;
T0 = SPH_T32(T0 + 128);
COMPRESS32;
cuda_sph_enc32be((unsigned char*)output + 4*6, H6);
cuda_sph_enc32be((unsigned char*)output + 4*7, H7);
if (validate || output[3] <= ptarget64[3])
{
// this data is only needed when we actually need to save the hashes
cuda_sph_enc32be((unsigned char*)output + 4*0, H0);
cuda_sph_enc32be((unsigned char*)output + 4*1, H1);
cuda_sph_enc32be((unsigned char*)output + 4*2, H2);
cuda_sph_enc32be((unsigned char*)output + 4*3, H3);
cuda_sph_enc32be((unsigned char*)output + 4*4, H4);
cuda_sph_enc32be((unsigned char*)output + 4*5, H5);
}
if (validate)
{
g_out += 4 * ((blockIdx.x * blockDim.x) + threadIdx.x);
#pragma unroll
for (int i=0; i < 4; ++i) g_out[i] = output[i];
}
if (output[3] <= ptarget64[3]) {
uint64_t *g_good64 = (uint64_t*)g_good;
if (output[3] < g_good64[3]) {
g_good64[3] = output[3];
g_good64[2] = output[2];
g_good64[1] = output[1];
g_good64[0] = output[0];
g_good[8] = nonce + ((blockIdx.x * blockDim.x) + threadIdx.x);
}
}
}
static std::map<int, uint32_t *> context_good[2];
static bool init[MAX_GPUS] = { 0 };
bool default_prepare_blake256(int thr_id, const uint32_t host_pdata[20], const uint32_t host_ptarget[8])
{
if (!init[thr_id])
{
// allocate pinned host memory for good hashes
uint32_t *tmp;
checkCudaErrors(cudaMalloc((void **) &tmp, 9*sizeof(uint32_t))); context_good[0][thr_id] = tmp;
checkCudaErrors(cudaMalloc((void **) &tmp, 9*sizeof(uint32_t))); context_good[1][thr_id] = tmp;
init[thr_id] = true;
}
checkCudaErrors(cudaMemcpyToSymbol(pdata, host_pdata, 80, 0, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpyToSymbol(ptarget64, host_ptarget, 32, 0, cudaMemcpyHostToDevice));
return context_good[0][thr_id] && context_good[1][thr_id];
}
void default_do_blake256(dim3 grid, dim3 threads, int thr_id, int stream, uint32_t *hash, uint32_t nonce, int throughput, bool do_d2h)
{
checkCudaErrors(cudaMemsetAsync(context_good[stream][thr_id], 0xff, 9 * sizeof(uint32_t), context_streams[stream][thr_id]));
cuda_blake256_hash<<<grid, threads, 0, context_streams[stream][thr_id]>>>((uint64_t*)context_hash[stream][thr_id], nonce, context_good[stream][thr_id], do_d2h);
// copy hashes from device memory to host (ALL hashes, lots of data...)
if (do_d2h && hash != NULL) {
size_t mem_size = throughput * sizeof(uint32_t) * 8;
checkCudaErrors(cudaMemcpyAsync(hash, context_hash[stream][thr_id], mem_size,
cudaMemcpyDeviceToHost, context_streams[stream][thr_id]));
}
else if (hash != NULL) {
// asynchronous copy of winning nonce (just 4 bytes...)
checkCudaErrors(cudaMemcpyAsync(hash, context_good[stream][thr_id]+8, sizeof(uint32_t),
cudaMemcpyDeviceToHost, context_streams[stream][thr_id]));
}
}
void default_free_blake256(int thr_id)
{
if (init[thr_id]) {
cudaFree(context_good[0][thr_id]);
cudaFree(context_good[1][thr_id]);
init[thr_id] = false;
}
}
|
the_stack
|
#include <cuda_runtime.h>
#include <vector>
#include <iostream>
#include <chrono>
#include <type_traits>
using namespace tensorflow;
const int kThreadsPerBlock = 256;
#define min(a, b) ((a) > (b))? (b): (a)
#define max(a, b) ((a) > (b))? (a): (b)
__host__ __device__ __forceinline__ int divUp(int total, int grain)
{
return (total + grain - 1) / grain;
}
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600
#else
static __inline__ __device__ double atomicAdd(double *address, double val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
if (val==0.0)
return __longlong_as_double(old);
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val +__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
void cudaErrorCheck(int line) {
cudaError_t error = cudaGetLastError();
if(error != cudaSuccess)
{
// print the CUDA error message and exit
printf("CUDA error at line %d: %s\n", line, cudaGetErrorString(error));
exit(-1);
}
}
class timer
{
// alias our types for simplicity
using clock = typename std::conditional< std::chrono::high_resolution_clock::is_steady,
std::chrono::high_resolution_clock,
std::chrono::steady_clock >::type ;
using time_point_type = std::chrono::time_point < clock, std::chrono::milliseconds > ;
public:
// default constructor that stores the start time
timer()
{
start = std::chrono::time_point_cast<std::chrono::milliseconds>(clock::now());
}
// gets the time elapsed from construction.
float getTimePassed()
{
// get the new time
auto end = clock::now();
// return the difference of the times
return (end - start).count() / 1e9f;
}
private:
time_point_type start;
};
struct GpuAlloc {
GpuAlloc(OpKernelContext *context) {
this->context = context;
}
void alloc(void**data, int bytes) {
// We use Tensorflow's allocate_temp to allocate memory instead of using cudaMalloc/cudaFree directly.
// Using cudaMalloc/cudaFree is totally ok for a single GPU case, but can cause some race conditions in multi-GPU case,
// resulting in long delays in memory allocation and free.
int num_elems = std::ceil(bytes / 8.0);
// Have to keep the tensors until end of op to avoid memory crash.
tmp_tensors.push_back(Tensor());
Tensor &tmp = tmp_tensors.back();
TensorShape shape({num_elems});
OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<double>::value, shape, &tmp));
double *buf = &(tmp.flat<double>()(0));
*data = (void*)buf;
}
void free(void **data) {
*data = NULL;
}
void zero(void *data, int bytes) {
cudaMemset(data, 0, bytes);
}
OpKernelContext *context;
std::vector<Tensor> tmp_tensors;
};
/*
struct GpuAlloc {
GpuAlloc(OpKernelContext *context) {
}
void alloc(void **data, int bytes) {
cudaMalloc(data, bytes);
}
void free(void **data) {
cudaFree(*data);
*data = NULL;
}
void zero(void *data, int bytes) {
cudaMemset(data, 0, bytes);
}
};*/
/**
* This array wraps the data provided by a device pointer, or from a device memory allocation.
*
* It refers to CUDA APIs to allocate memory from a host function call.
* Accessing values of the array must be done on the device.
*/
template <typename T>
struct Array {
T *data;
int capacity;
int size;
Array() {
data = NULL;
capacity = 0;
size = 0;
}
/**
* Wrap an input array
*/
Array(T *data, int n) {
this->data = data;
capacity = n;
size = n;
}
void alloc(GpuAlloc *allocator, int new_capacity) {
if (new_capacity > capacity) {
this->free(allocator);
allocator->alloc((void**)&data, new_capacity * sizeof(T));
this->capacity = new_capacity;
}
this->size = 0;
}
void resize(GpuAlloc *allocator, int size) {
if (size > capacity) {
this->free(allocator);
this->alloc(allocator, size);
}
this->size = size;
}
void free(GpuAlloc *allocator) {
if (! data || capacity == 0) return;
allocator->free((void**)&data);
this->data = NULL;
this->capacity = 0;
this->size = 0;
}
__device__ void zero() {
for (int i = 0; i < size; ++i) data[i] = 0;
}
__host__ void zero(GpuAlloc *allocator) {
allocator->zero((void*)data, size * sizeof(T));
}
__device__ T& operator[](int i) {
return data[i];
}
__device__ T operator[](int i) const {
return data[i];
}
__device__ void append(T value) {
data[size] = value;
size++;
}
__device__ void clear() {
size = 0;
}
};
struct Query {
__device__ virtual void operator()(int ii, int f, int fsize) = 0;
};
template <typename T>
struct Grid {
Array<T> points;
Array<int> neighbor_count; // for each point, for each filter, for each cell of the filter, store how many points in that cell.
T voxel_size;
int filter_x, filter_y, filter_z, filter_count;
int stride_x, stride_y, stride_z;
int filter_full_x, filter_full_y, filter_full_z;
Grid() {}
Grid(Array<T> points, T voxel_size, int filter_x, int filter_y, int filter_z, int stride_x, int stride_y, int stride_z) {
this->points = points;
this->voxel_size = voxel_size;
this->filter_x = filter_x;
this->filter_y = filter_y;
this->filter_z = filter_z;
this->filter_count = filter_x * filter_y * filter_z;
this->stride_x = stride_x;
this->stride_y = stride_y;
this->stride_z = stride_z;
filter_full_x = (filter_x - 1) * stride_x + 1;
filter_full_y = (filter_y - 1) * stride_y + 1;
filter_full_z = (filter_z - 1) * stride_z + 1;
}
void alloc(GpuAlloc *allocator) {
neighbor_count.resize(allocator, points.size * filter_count);
neighbor_count.zero(allocator);
}
__device__ void neighbor_brute_force(int i, T x, T y, T z, Query &query)
{
// Center the filter at the current point
T xmin = x - filter_full_x * 0.5 * voxel_size;
T xmax = x + filter_full_x * 0.5 * voxel_size;
T ymin = y - filter_full_y * 0.5 * voxel_size;
T ymax = y + filter_full_y * 0.5 * voxel_size;
T zmin = z - filter_full_z * 0.5 * voxel_size;
T zmax = z + filter_full_z * 0.5 * voxel_size;
for (int j = 0; j < points.size; ++j) {
T vx = points[3 * j + 0];
T vy = points[3 * j + 1];
T vz = points[3 * j + 2];
if (vx < xmin || vx > xmax || vy < ymin || vy > ymax || vz < zmin || vz > zmax) continue;
// Determine which cell
int fx = min(filter_full_x - 1, (int)((vx - xmin) / voxel_size));
int fy = min(filter_full_y - 1, (int)((vy - ymin) / voxel_size));
int fz = min(filter_full_z - 1, (int)((vz - zmin) / voxel_size));
// If the cell is a hole, skip
if (fx % stride_x != 0 || fy % stride_y != 0 || fz % stride_z != 0) continue;
fx /= stride_x;
fy /= stride_y;
fz /= stride_z;
int f = (fz * filter_y + fy) * filter_x + fx;
// Good point
query(j, f, neighbor_count[i * filter_count + f]);
}
}
__device__ void build_neighbor_count(int i) {
T x = points[3 * i + 0];
T y = points[3 * i + 1];
T z = points[3 * i + 2];
// Center the filter at the current point
T xmin = x - filter_full_x * 0.5 * voxel_size;
T xmax = x + filter_full_x * 0.5 * voxel_size;
T ymin = y - filter_full_y * 0.5 * voxel_size;
T ymax = y + filter_full_y * 0.5 * voxel_size;
T zmin = z - filter_full_z * 0.5 * voxel_size;
T zmax = z + filter_full_z * 0.5 * voxel_size;
for (int j = 0; j < points.size; ++j) {
T vx = points[3 * j + 0];
T vy = points[3 * j + 1];
T vz = points[3 * j + 2];
if (vx < xmin || vx > xmax || vy < ymin || vy > ymax || vz < zmin || vz > zmax) continue;
// Determine which cell
int fx = min(filter_full_x - 1, (int)((vx - xmin) / voxel_size));
int fy = min(filter_full_y - 1, (int)((vy - ymin) / voxel_size));
int fz = min(filter_full_z - 1, (int)((vz - zmin) / voxel_size));
// If the cell is a hole, skip
if (fx % stride_x != 0 || fy % stride_y != 0 || fz % stride_z != 0) continue;
fx /= stride_x;
fy /= stride_y;
fz /= stride_z;
int f = (fz * filter_y + fy) * filter_x + fx;
neighbor_count[i * filter_count + f]++;
}
}
void free(GpuAlloc *allocator) {
neighbor_count.free(allocator);
}
};
template <typename T>
__global__ void kernelBuildNeighborCount(int batch_size, int num_points, Array< Grid<T> > grids) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int b = idx / num_points;
int i = idx % num_points;
if (b >= batch_size) return;
grids[b].build_neighbor_count(i);
}
template <typename T>
struct ForwardQuery : public Query {
__device__ ForwardQuery(int i, const T *points, const T *input, const T *filter, T *output,
int batch_size, int num_points, int filter_x, int filter_y, int filter_z,
int stride_x, int stride_y, int stride_z,
int filter_c_in, int filter_c_out, T voxel_size)
: i(i), points(points),
input(input),
filter(filter),
output(output),
batch_size(batch_size), num_points(num_points), filter_x(filter_x), filter_y(filter_y), filter_z(filter_z),
stride_x(stride_x), stride_y(stride_y), stride_z(stride_z),
filter_c_in(filter_c_in), filter_c_out(filter_c_out), voxel_size(voxel_size)
{
}
__device__ void operator()(int ii, int f, int fsize) {
T inv_fsize = 1.0 / fsize;
#pragma unroll
for (int c = 0; c < filter_c_out; ++c) {
#pragma unroll
for (int k = 0; k < filter_c_in; ++k) {
// Get filter weight
T w = filter[(f * filter_c_in + k) * filter_c_out + c];
output[i * filter_c_out + c] += w * input[ii * filter_c_in + k] * inv_fsize;
}
}
}
int i;
const T *points; const T *input; const T *filter; T *output;
int batch_size; int num_points; int filter_x; int filter_y; int filter_z;
int stride_x; int stride_y; int stride_z;
int filter_c_in; int filter_c_out; float voxel_size;
};
template <typename T>
__global__ void kernelForward(const T *points_flat, const T *input_flat, const T *filter, T *output_flat,
int batch_size, int num_points, int filter_x, int filter_y, int filter_z,
int stride_x, int stride_y, int stride_z,
int filter_c_in, int filter_c_out, T voxel_size,
Array< Grid<T> > grids ) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int b = idx / num_points;
int i = idx % num_points;
if (b >= batch_size) return;
const T *points = points_flat + b * num_points * 3; // XYZ as input
const T *input = input_flat + b * num_points * filter_c_in;
T *output = output_flat + b * num_points * filter_c_out;
Grid<T> &grid = grids[b];
T x = points[3 * i + 0];
T y = points[3 * i + 1];
T z = points[3 * i + 2];
ForwardQuery<T> query(i, points, input, filter, output, batch_size, num_points, filter_x, filter_y, filter_z,
stride_x, stride_y, stride_z,
filter_c_in, filter_c_out, voxel_size);
grid.neighbor_brute_force(i, x, y, z, query);
}
template <typename T>
struct GradientQuery : Query {
int j; T x, y, z;
const T *grad_from_next_tensor;
const T *points; const T *input; const T *filter; T *grad_input; T *grad_filter_thread_arr;
int batch_size; int num_points; int filter_x; int filter_y; int filter_z; int filter_count;
int stride_x; int stride_y; int stride_z;
int filter_full_x; int filter_full_y; int filter_full_z;
int filter_c_in; int filter_c_out; float voxel_size;
Array<int> &neighbor_count;
__device__ GradientQuery(int j, T x, T y, T z, /* the point where we start the neighbor query */
const T *grad_from_next_tensor,
const T *points, const T *input, const T *filter, T *grad_input, T *grad_filter_thread_arr,
int batch_size, int num_points, int filter_x, int filter_y, int filter_z,
int stride_x, int stride_y, int stride_z,
int filter_full_x, int filter_full_y, int filter_full_z,
int filter_c_in, int filter_c_out, float voxel_size, Array<int> &neighbor_count)
: j(j), x(x), y(y), z(z),
grad_from_next_tensor(grad_from_next_tensor),
points(points),
input(input),
filter(filter),
grad_input(grad_input), grad_filter_thread_arr(grad_filter_thread_arr),
batch_size(batch_size), num_points(num_points), filter_x(filter_x), filter_y(filter_y), filter_z(filter_z),
stride_x(stride_x), stride_y(stride_y), stride_z(stride_z),
filter_full_x(filter_full_x), filter_full_y(filter_full_y), filter_full_z(filter_full_z),
filter_c_in(filter_c_in), filter_c_out(filter_c_out), voxel_size(voxel_size),
neighbor_count(neighbor_count)
{
filter_count = filter_x * filter_y * filter_z;
}
__device__ void operator()(int ii, int f_ii, int fsize_ii) {
// Take i as center
T kx = points[3 * ii + 0];
T ky = points[3 * ii + 1];
T kz = points[3 * ii + 2];
T xmin = kx - filter_full_x * 0.5 * voxel_size;
T ymin = ky - filter_full_y * 0.5 * voxel_size;
T zmin = kz - filter_full_z * 0.5 * voxel_size;
// Check which cell the point pj is in w.r.t the point pi
int fx = min(filter_full_x - 1, (int)((x - xmin) / voxel_size));
int fy = min(filter_full_y - 1, (int)((y - ymin) / voxel_size));
int fz = min(filter_full_z - 1, (int)((z - zmin) / voxel_size));
// If the cell is a hole, skip
if (fx % stride_x != 0 || fy % stride_y != 0 || fz % stride_z != 0) return;
fx /= stride_x;
fy /= stride_y;
fz /= stride_z;
int filter_index = (fz * filter_y + fy) * filter_x + fx;
int count = neighbor_count[ii * filter_count + filter_index];
if (count == 0) return; // FIXME: non-symmetric neighbor issue
// For all types of filters
#pragma unroll
for (int c = 0; c < filter_c_out; ++c) {
int out_index = ii * filter_c_out + c;
T dL_dxi = grad_from_next_tensor[out_index];
T dL_dxi_div_count = dL_dxi / (T)count;
#pragma unroll
for (int k = 0; k < filter_c_in; ++k) {
int weight_index = (filter_index * filter_c_in + k) * filter_c_out + c;
int in_index = j * filter_c_in + k;
// Update the gradient of an input xi
T w_as = filter[weight_index];
grad_input[in_index] += dL_dxi_div_count * w_as;
// Update the gradient of a filter weight
T dxi_dw = input[in_index];
atomicAdd(&grad_filter_thread_arr[weight_index], dL_dxi_div_count * dxi_dw);
}
}
}
};
template <typename T>
__global__ void kernelGradient(const T *grad_from_next_tensor_flat,
const T *points_flat, const T *input_flat, const T *filter,
T *grad_input_flat, T *grad_filter_thread_arr,
int batch_size, int num_points, int filter_x, int filter_y, int filter_z,
int stride_x, int stride_y, int stride_z,
int filter_c_in, int filter_c_out, T voxel_size,
Array< Grid<T> > grids) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int b = idx / num_points;
int j = idx % num_points;
if (b >= batch_size) return;
const T *grad_from_next_tensor = grad_from_next_tensor_flat + b * num_points * filter_c_out;
const T *points = points_flat + b * num_points * 3; // XYZ as input
const T *input = input_flat + b * num_points * filter_c_in;
T *grad_input = grad_input_flat + b * num_points * filter_c_in;
Grid<T> &grid = grids[b];
T x = points[3 * j + 0];
T y = points[3 * j + 1];
T z = points[3 * j + 2];
GradientQuery<T> query(j, x, y, z, grad_from_next_tensor,
points, input, filter, grad_input, grad_filter_thread_arr, batch_size, num_points, filter_x, filter_y, filter_z,
stride_x, stride_y, stride_z,
grid.filter_full_x, grid.filter_full_y, grid.filter_full_z,
filter_c_in, filter_c_out, voxel_size, grid.neighbor_count);
grid.neighbor_brute_force(j, x, y, z, query);
}
template <typename T>
class Conv3pOp : public OpKernel {
public:
explicit Conv3pOp(OpKernelConstruction* context) : OpKernel(context) {
}
void Compute(OpKernelContext* context) override {
// Point tensor is of the following dimensions:
// [ batch, num_points, 3 ]
const Tensor& points_tensor = context->input(0);
OP_REQUIRES(context, points_tensor.dims()==3, errors::InvalidArgument("Conv3p expects (batch_size, num_points, 3) points shape"));
int batch_size = points_tensor.shape().dim_size(0);
int device;
cudaGetDevice(&device);
int num_points = points_tensor.shape().dim_size(1);
auto points_flat = points_tensor.flat<T>();
// Input tensor is of the following dimensions:
const Tensor& input_tensor = context->input(1);
OP_REQUIRES(context, input_tensor.shape().dim_size(0) == points_tensor.shape().dim_size(0), errors::InvalidArgument("Conv3p expects points and input tensor to have the same batch size"));
OP_REQUIRES(context, input_tensor.shape().dim_size(1) == points_tensor.shape().dim_size(1), errors::InvalidArgument("Conv3p expects points and input tensor to have the same number of points"));
int num_channels_in = input_tensor.shape().dim_size(2);
auto input_flat = input_tensor.flat<T>();
// Input filter is of the following dimensions:
// [ filter_z, filter_y, filter_x, in_channels, out_channels]
const Tensor& filter_tensor = context->input(2);
int filter_z = filter_tensor.shape().dim_size(0);
int filter_y = filter_tensor.shape().dim_size(1);
int filter_x = filter_tensor.shape().dim_size(2);
int filter_c_in = filter_tensor.shape().dim_size(3);
int filter_c_out = filter_tensor.shape().dim_size(4);
OP_REQUIRES(context, filter_c_in == num_channels_in, errors::InvalidArgument("Conv3p expects filter channels to be matched with input channels"));
auto filter_flat = filter_tensor.flat<T>();
const Tensor& stride_tensor = context->input(3);
OP_REQUIRES(context, stride_tensor.shape().dim_size(0) == 3, errors::InvalidArgument("Conv3p expects stride tensor to have size 3."));
const int *stride_flat = &(stride_tensor.flat<int>()(0));
int strides[3];
cudaMemcpy(strides, stride_flat, sizeof(int) * 3, cudaMemcpyDeviceToHost);
int stride_x = strides[0];
int stride_y = strides[1];
int stride_z = strides[2];
const Tensor& voxel_tensor = context->input(4);
OP_REQUIRES(context, voxel_tensor.shape().dim_size(0) == 1, errors::InvalidArgument("Conv3p expects voxel tensor to have dimension 1."));
const T *voxel_flat = &(voxel_tensor.flat<T>()(0));
T voxel_size;
cudaMemcpy(&voxel_size, voxel_flat, sizeof(T), cudaMemcpyDeviceToHost);
// Create output tensor
Tensor* output_tensor = NULL;
OP_REQUIRES_OK(context, context->allocate_output(0, TensorShape{batch_size, num_points, filter_c_out},
&output_tensor));
auto output_flat = output_tensor->flat<T>();
cudaMemset(&(output_flat(0)), 0, sizeof(T) * batch_size * num_points * filter_c_out);
GpuAlloc allocator(context);
//timer t1;
std::vector<Grid<T> > cpu_grids(batch_size);
for (int b = 0; b < batch_size; ++b) {
const T *points = &(points_flat(0)) + b * num_points * 3;
cpu_grids[b] = Grid<T>(Array<T>((T*)points, num_points), voxel_size, filter_x, filter_y, filter_z, stride_x, stride_y, stride_z);
cpu_grids[b].alloc(&allocator);
}
Array< Grid<T> > grids;
grids.resize(&allocator, batch_size);
cudaMemcpy(grids.data, cpu_grids.data(), sizeof(Grid<T>) * batch_size, cudaMemcpyHostToDevice);
//std::cout << "device " << device << "t1: " << t1.getTimePassed() << std::endl;
//timer t2;
{
dim3 blocks(divUp(batch_size * num_points, kThreadsPerBlock));
dim3 threads(kThreadsPerBlock);
kernelBuildNeighborCount<<<blocks, threads>>>(batch_size, num_points, grids);
}
//cudaDeviceSynchronize();
//std::cout << "device " << device << "t2: " << t2.getTimePassed() << std::endl;
//timer t3;
{
// Now parallelize over all batches and all points
dim3 blocks(divUp(batch_size * num_points, kThreadsPerBlock));
dim3 threads(kThreadsPerBlock);
kernelForward<<<blocks, threads>>>(&(points_flat(0)), &(input_flat(0)), &(filter_flat(0)), &(output_flat(0)),
batch_size, num_points, filter_x, filter_y, filter_z,
stride_x, stride_y, stride_z,
filter_c_in, filter_c_out, voxel_size,
grids);
}
//cudaDeviceSynchronize();
//std::cout << "device " << device << "t3: " << t3.getTimePassed() << std::endl;
//timer t4;
for (int b = 0; b < batch_size; ++b) {
cpu_grids[b].free(&allocator);
}
grids.free(&allocator);
//cudaDeviceSynchronize();
//std::cout << "device " << device << "t4: " << t4.getTimePassed() << std::endl;
}
};
#define REGISTER_GPU_KERNEL(T) \
REGISTER_KERNEL_BUILDER( \
Name("Conv3p").Device(DEVICE_GPU).TypeConstraint<T>("T"), \
Conv3pOp<T>);
TF_CALL_float(REGISTER_GPU_KERNEL);
TF_CALL_double(REGISTER_GPU_KERNEL);
#undef REGISTER_GPU_KERNEL
////////////////////////////////////////////////////////////////////////////////
template <typename T>
class Conv3pGradOp : public OpKernel {
public:
explicit Conv3pGradOp(OpKernelConstruction* context) : OpKernel(context) {
}
void Compute(OpKernelContext* context) override {
/**1. Setting things up **/
// get the gradient tensor (from the later tensor)
const Tensor& grad_from_next_tensor = context->input(0);
auto grad_from_next_tensor_flat = grad_from_next_tensor.flat<T>();
// get other inputs
const Tensor& points_tensor = context->input(1);
auto points_flat = points_tensor.flat<T>();
const Tensor& input_tensor = context->input(2);
auto input_flat = input_tensor.flat<T>();
// infos about the inputs
int batch_size = points_tensor.shape().dim_size(0);
int num_points = points_tensor.shape().dim_size(1);
// get the filters tensor (which include weights)
const Tensor& filter_tensor = context->input(3);
auto filter_flat = filter_tensor.flat<T>();
const Tensor& stride_tensor = context->input(4);
OP_REQUIRES(context, stride_tensor.shape().dim_size(0) == 3, errors::InvalidArgument("Conv3p expects stride tensor to have size 3."));
const int *stride_flat = &(stride_tensor.flat<int>()(0));
int strides[3];
cudaMemcpy(strides, stride_flat, sizeof(int) * 3, cudaMemcpyDeviceToHost);
int stride_x = strides[0];
int stride_y = strides[1];
int stride_z = strides[2];
const Tensor& voxel_tensor = context->input(5);
OP_REQUIRES(context, voxel_tensor.shape().dim_size(0) == 1, errors::InvalidArgument("Conv3p expects voxel tensor to have dimension 1."));
const T *voxel_flat = &(voxel_tensor.flat<T>()(0));
T voxel_size;
cudaMemcpy(&voxel_size, voxel_flat, sizeof(T), cudaMemcpyDeviceToHost);
// dimensional infos for the filters tensor
int filter_z = filter_tensor.shape().dim_size(0);
int filter_y = filter_tensor.shape().dim_size(1);
int filter_x = filter_tensor.shape().dim_size(2);
int filter_c_in = filter_tensor.shape().dim_size(3);
int filter_c_out = filter_tensor.shape().dim_size(4);
int num_weights = filter_z * filter_y * filter_x * filter_c_in * filter_c_out;
// Get shape of the grad tensors
TensorShape grad_input_shape = input_tensor.shape();
TensorShape grad_filter_shape = filter_tensor.shape();
// Create the output tensor for the gradient of the inputs
// How many points * number of input channel = how many gradients.
Tensor* grad_input = NULL;
OP_REQUIRES_OK(context, context->allocate_output(0, grad_input_shape, &grad_input));
auto grad_input_flat = grad_input->flat<T>();
cudaMemset(&(grad_input_flat(0)), 0, sizeof(T) * input_tensor.shape().dim_size(0)*input_tensor.shape().dim_size(1)*input_tensor.shape().dim_size(2));
// a) First we need to check if the size of the grad tensor and the number of points are compitable.
OP_REQUIRES(context, grad_from_next_tensor.shape().dim_size(0) == batch_size, errors::InvalidArgument("backprop grad tensor has wrong size for dim 0"));
OP_REQUIRES(context, grad_from_next_tensor.shape().dim_size(1) == num_points, errors::InvalidArgument("backprop grad tensor has wrong size for dim 1"));
OP_REQUIRES(context, grad_from_next_tensor.shape().dim_size(2) == filter_c_out, errors::InvalidArgument("backprop grad tensor has wrong size for dim 2"));
Tensor* grad_filter = NULL;
OP_REQUIRES_OK(context, context->allocate_output(1, grad_filter_shape, &grad_filter));
auto grad_filter_flat = grad_filter->flat<T>();
cudaMemset(&(grad_filter_flat(0)), 0, sizeof(T) * num_weights);
GpuAlloc allocator(context);
std::vector<Grid<T> > cpu_grids(batch_size);
for (int b = 0; b < batch_size; ++b) {
const T *points = &(points_flat(0)) + b * num_points * 3;
cpu_grids[b] = Grid<T>(Array<T>((T*)points, num_points), voxel_size, filter_x, filter_y, filter_z, stride_x, stride_y, stride_z);
cpu_grids[b].alloc(&allocator);
}
Array< Grid<T> > grids;
grids.resize(&allocator, batch_size);
cudaMemcpy(grids.data, cpu_grids.data(), sizeof(Grid<T>) * batch_size, cudaMemcpyHostToDevice);
{
dim3 blocks(divUp(batch_size * num_points, kThreadsPerBlock));
dim3 threads(kThreadsPerBlock);
kernelBuildNeighborCount<<<blocks, threads>>>(batch_size, num_points, grids);
}
// Now parallelize over all batches and all points
{
dim3 blocks(divUp(batch_size * num_points, kThreadsPerBlock));
dim3 threads(kThreadsPerBlock);
kernelGradient<<<blocks, threads>>>(&(grad_from_next_tensor_flat(0)), &(points_flat(0)), &(input_flat(0)), &(filter_flat(0)),
&(grad_input_flat(0)), &(grad_filter_flat(0)), // grad_filter_thread_arr.data,
batch_size, num_points, filter_x, filter_y, filter_z,
stride_x, stride_y, stride_z,
filter_c_in, filter_c_out, voxel_size,
grids);
}
for (int b = 0; b < batch_size; ++b) {
cpu_grids[b].free(&allocator);
}
grids.free(&allocator);
/**2. Compute gradient of the input**/
// dL_dxj = (sum over xi that have xj as a neighbor)dL/dxi * w_as
// w_as is the weight associated xi and xj. w_as = 0 if xj not contribute to xi.
// we take avantaged of if xj is a neighbor of xi, then xi is also a neighbor of xi
// or that we have symmetric neighborhood, then
// dL_dxj = (sum over all xi that are neightbor of xj)dL/dxi * w_as
// /**3. Compute gradient of the filter**/
// Reminder: grad_from_next_tensor contain dL/dx, with xi is an output of the forward pass.
// Reminder: we can calculate dL/dw = (sum over i)dL/dxi * dx/dw, with w is a weight that connect x to the next layer
// =>
// b) We do this by go through all the points and accumulate the gradients
// Create the output tensor for the gradient of the filter
}
};
#define REGISTER_GPU_KERNEL(T) \
REGISTER_KERNEL_BUILDER( \
Name("Conv3pGrad").Device(DEVICE_GPU).TypeConstraint<T>("T"), \
Conv3pGradOp<T>);
TF_CALL_float(REGISTER_GPU_KERNEL);
TF_CALL_double(REGISTER_GPU_KERNEL);
#undef REGISTER_GPU_KERNEL
|
the_stack
|
using namespace amgx;
namespace amgx
{
namespace aggregation
{
namespace size8_selector
{
template <int NUM_COLS, typename IndexType>
__global__ __launch_bounds__(256, 4)
void my_findStrongestNeighbourBlockDiaCsr_NoMergeClean(
const IndexType *row_offsets, const IndexType *column_indices,
const float *edge_weights, const IndexType num_block_rows, const IndexType num_nonzero,
IndexType *aggregated, IndexType *aggregates, int *strongest_neighbour,
IndexType *partner_index, float *weight_strongest_neighbour, int deterministic,
const IndexType *unassigned_rows,
const int num_unassigned_row)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
const int lane_id = utils::lane_id();
bool valid_tid = false;
for (; utils::any(valid_tid = tid < num_block_rows); tid += gridDim.x * blockDim.x)
{
int jmin = -NUM_COLS * 2, jmax = -NUM_COLS * 4;
float weight;
int jcol;
float max_weight_unaggregated = 0;
int strongest_unaggregated = -1;
bool is_unassigned = false;
if (valid_tid)
{
is_unassigned = (__load_streaming(partner_index + tid) == -1);
}
if (is_unassigned) // Unaggregated row
{
jmin = __load_all(row_offsets + tid);
jmax = __load_all(row_offsets + tid + 1);
}
if (utils::any(is_unassigned))
{
int jj = jmin - amgx::strided_reduction::warp_loader<int, NUM_COLS>::align_shift(jmin);
for (; utils::any(jj < jmax, utils::activemask()); jj += NUM_COLS)
{
int I[NUM_COLS];
float W[NUM_COLS];
int P[NUM_COLS];
amgx::strided_reduction::warp_loader<int, NUM_COLS>::load(column_indices, jj, num_nonzero, I);
#pragma unroll
for (int i = 0; i < NUM_COLS; i++)
{
int j = jj + i;
jcol = I[i];
if (j >= jmin && j < jmax)
{
P[i] = __load_nc(partner_index + jcol); //make this load ASAP
}
}
amgx::strided_reduction::warp_loader<float, NUM_COLS>::load(edge_weights, jj, num_nonzero, W);
#pragma unroll
for (int i = 0; i < NUM_COLS; i++)
{
weight = W[i];
jcol = I[i];
int j = jj + i;
if (j >= jmin && j < jmax)
{
if (weight > max_weight_unaggregated || (weight == max_weight_unaggregated && jcol > strongest_unaggregated)) // unaggregated
{
if (tid != jcol && P[i] == -1)
{
max_weight_unaggregated = weight;
strongest_unaggregated = jcol;
}
}
}
}
}
if (strongest_unaggregated == -1) // All neighbours are aggregated
{
// Put in its own aggregate
if (!deterministic && is_unassigned)
{
partner_index[tid] = tid;
}
}
else
{
strongest_neighbour[tid] = strongest_unaggregated;
}
}
}
}
#define ALGORITHM_NOMERGE 0
#define ALGORITHM_STOREWEIGHTS 1
#define ALGORITHM_STOREWEIGHTS_2 2
template <int NUM_COLS, int ALGORITHM, int ASSUME_ALL_UNASSIGNED, int LOAD_ONLY_UNASSIGNED, typename IndexType>
__global__ __launch_bounds__(256, 4)
void my_findStrongestNeighbourBlockDiaCsr_NoMerge(
const IndexType *row_offsets, const IndexType *column_indices,
const float *edge_weights, const IndexType num_block_rows, const IndexType num_nonzero,
IndexType *aggregated, IndexType *aggregates, int *strongest_neighbour,
IndexType *partner_index, float *weight_strongest_neighbour, int deterministic,
const IndexType *n_unassigned_per_block, const IndexType *unassigned_per_block
//const int num_unassigned_row
//const IndexType *unassigned_rows,
)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
int bid = blockIdx.x;
const int lane_id = utils::lane_id();
bool valid_tid = false;
for (; utils::any( valid_tid = tid < num_block_rows); tid += gridDim.x * blockDim.x)
{
int jmin = -NUM_COLS * 2, jmax = -NUM_COLS * 4;
float weight;
int jcol;
float max_weight_unaggregated = 0;
int strongest_unaggregated = -1;
float max_weight_aggregated = 0.;
int strongest_aggregated = -1;
int partner = -1;
int partner0, partner1, partner2;
int agg_jcol;
bool is_unassigned = false;
int rowi = -1;
if (LOAD_ONLY_UNASSIGNED)
{
if (valid_tid)
{
rowi = unassigned_per_block[tid];//unassigned_per_block+bid*256+threadIdx.x);
is_unassigned = (__load_nc(partner_index + rowi) == -1);
}
if (is_unassigned)
{
jmin = __load_nc(row_offsets + rowi);
jmax = __load_nc(row_offsets + rowi + 1);
}
}
else
{
rowi = tid;
if (ALGORITHM == ALGORITHM_NOMERGE)
{
if (valid_tid) { is_unassigned = (__load_streaming(partner_index + tid) == -1); }
}
else //ALGORITHM_STOREWEIGHTS or ALGORITHM_STOREWEIGHTS_2
{
if (valid_tid) { is_unassigned = (__load_streaming(aggregated + tid) == -1); }
}
if (is_unassigned) // mind the else above
{
jmin = __load_global(row_offsets + rowi);
jmax = __load_lastuse(row_offsets + rowi + 1);
}
}
if (utils::any(is_unassigned))
{
if (is_unassigned) // Unaggregated row
{
if (ALGORITHM == ALGORITHM_STOREWEIGHTS)
{
partner = partner_index[rowi];
}
else if (ALGORITHM == ALGORITHM_STOREWEIGHTS_2)
{
partner0 = partner_index[rowi];
partner1 = partner_index[num_block_rows + rowi];
partner2 = partner_index[2 * num_block_rows + rowi];
}
}
int jj = jmin - amgx::strided_reduction::warp_loader<int, NUM_COLS>::align_shift(jmin);
for (; utils::any(jj < jmax && jmax >= 0); jj += NUM_COLS)
{
int I[NUM_COLS];
float W[NUM_COLS];
int P[NUM_COLS];
int jj_ok = (jj >= 0 && jj < jmax && jmax >= 0) ? jj : 0;
amgx::strided_reduction::warp_loader<int, NUM_COLS>::load(column_indices, jj_ok, num_nonzero, I);
#pragma unroll
for (int i = 0; i < NUM_COLS; i++)
{
int j = jj + i;
jcol = I[i];
if (j >= jmin && j < jmax)
{
if (ALGORITHM == ALGORITHM_NOMERGE)
{
P[i] = __load_nc(partner_index + jcol); //make this load ASAP
}
else
{
P[i] = __load_nc(aggregated + jcol);
}
}
}
amgx::strided_reduction::warp_loader<float, NUM_COLS>::load(edge_weights, jj_ok, num_nonzero, W);
#pragma unroll
for (int i = 0; i < NUM_COLS; i++)
{
weight = W[i];
jcol = I[i];
int j = jj + i;
if (j >= jmin && j < jmax)
{
if (ALGORITHM == ALGORITHM_NOMERGE)
{
if (weight > max_weight_unaggregated || (weight == max_weight_unaggregated && jcol > strongest_unaggregated)) // unaggregated
{
if (rowi != jcol && P[i] == -1)
{
max_weight_unaggregated = weight;
strongest_unaggregated = jcol;
}
}
}
else
{
bool partner_condition;
if (ALGORITHM == ALGORITHM_STOREWEIGHTS)
{
partner_condition = jcol != partner;
}
else if (ALGORITHM == ALGORITHM_STOREWEIGHTS_2)
{
partner_condition = jcol != partner0 && jcol != partner1 && jcol != partner2;
}
agg_jcol = P[i];
if (partner_condition && rowi != jcol)
{
if (agg_jcol == -1 && (weight > max_weight_unaggregated || (weight == max_weight_unaggregated && jcol > strongest_unaggregated))) // unaggregated
{
max_weight_unaggregated = weight;
strongest_unaggregated = jcol;
}
else if (agg_jcol != -1 && (weight > max_weight_aggregated || (weight == max_weight_aggregated && jcol > strongest_aggregated))) // unaggregated
{
max_weight_aggregated = weight;
strongest_aggregated = jcol;
}
}
}
}
}
}
if (valid_tid && is_unassigned)
{
if (ALGORITHM == ALGORITHM_NOMERGE)
{
if (strongest_unaggregated == -1) // All neighbours are aggregated
{
// Put in its own aggregate
if (!deterministic)
{
partner_index[rowi] = rowi;
}
}
else
{
strongest_neighbour[rowi] = strongest_unaggregated;
}
}
else if (ALGORITHM == ALGORITHM_STOREWEIGHTS)
{
if (strongest_unaggregated == -1) // All neighbours are aggregated
{
if (!deterministic)
{
if (strongest_aggregated != -1)
{
aggregates[tid] = aggregates[strongest_aggregated];
aggregated[tid] = 1;
aggregates[partner] = aggregates[strongest_aggregated];
aggregated[partner] = 1;
}
else // leave in its own aggregate
{
aggregated[partner] = 1;
aggregated[tid] = 1;
}
}
}
else // Found an unaggregated aggregate
{
weight_strongest_neighbour[tid] = max_weight_unaggregated;
strongest_neighbour[tid] = aggregates[strongest_unaggregated];
}
}
else if (ALGORITHM == ALGORITHM_STOREWEIGHTS_2)
{
if (strongest_unaggregated == -1 && strongest_aggregated != -1) // all neighbours are aggregated, store the strongest aggregated
{
weight_strongest_neighbour[tid] = -max_weight_aggregated;
strongest_neighbour[tid] = aggregates[strongest_aggregated];
}
else if (strongest_unaggregated != -1)
{
weight_strongest_neighbour[tid] = max_weight_unaggregated;
strongest_neighbour[tid] = aggregates[strongest_unaggregated];
}
}
}
}
bid += gridDim.x;
}
}
#define INST_my_findStrongestNeighbourBlockDiaCsr_NoMerge(numcols, algo,assume,c) template __global__ void my_findStrongestNeighbourBlockDiaCsr_NoMerge<numcols,algo,assume,c>(\
const int *row_offsets, const int *column_indices,\
const float *edge_weights, const int num_block_rows,const int num_nonzero,\
int *aggregated, int *aggregates, int *strongest_neighbour,\
int *partner_index, float *weight_strongest_neighbour, int deterministic,\
const int* n_unassigned_per_block, const int * unassigned_per_block);
#define INSTmy_findStrongestNeighbourBlockDiaCsr_NoMergeClean(numcols) template __global__ void my_findStrongestNeighbourBlockDiaCsr_NoMergeClean<numcols,int>(\
const int *row_offsets, const int *column_indices,\
const float *edge_weights, const int num_block_rows,const int num_nonzero,\
int *aggregated, int *aggregates, int *strongest_neighbour,\
int *partner_index, float *weight_strongest_neighbour, int deterministic,\
const int *unassigned_rows,\
const int num_unassigned_row);
INST_my_findStrongestNeighbourBlockDiaCsr_NoMerge(1, ALGORITHM_NOMERGE, 0, 0)
INST_my_findStrongestNeighbourBlockDiaCsr_NoMerge(1, ALGORITHM_STOREWEIGHTS, 0, 0)
INST_my_findStrongestNeighbourBlockDiaCsr_NoMerge(1, ALGORITHM_STOREWEIGHTS_2, 0, 0)
INST_my_findStrongestNeighbourBlockDiaCsr_NoMerge(1, ALGORITHM_NOMERGE, 1, 0)
INST_my_findStrongestNeighbourBlockDiaCsr_NoMerge(1, ALGORITHM_STOREWEIGHTS, 1, 0)
INST_my_findStrongestNeighbourBlockDiaCsr_NoMerge(1, ALGORITHM_STOREWEIGHTS_2, 1, 0)
INST_my_findStrongestNeighbourBlockDiaCsr_NoMerge(1, ALGORITHM_NOMERGE, 0, 1) //load only unassigned
INST_my_findStrongestNeighbourBlockDiaCsr_NoMerge(2, ALGORITHM_NOMERGE, 0, 0)
INST_my_findStrongestNeighbourBlockDiaCsr_NoMerge(2, ALGORITHM_STOREWEIGHTS, 0, 0)
INST_my_findStrongestNeighbourBlockDiaCsr_NoMerge(2, ALGORITHM_STOREWEIGHTS_2, 0, 0)
INST_my_findStrongestNeighbourBlockDiaCsr_NoMerge(2, ALGORITHM_NOMERGE, 1, 0)
INST_my_findStrongestNeighbourBlockDiaCsr_NoMerge(2, ALGORITHM_STOREWEIGHTS, 1, 0)
INST_my_findStrongestNeighbourBlockDiaCsr_NoMerge(2, ALGORITHM_STOREWEIGHTS_2, 1, 0)
INST_my_findStrongestNeighbourBlockDiaCsr_NoMerge(2, ALGORITHM_NOMERGE, 0, 1) //load only unassigned
INST_my_findStrongestNeighbourBlockDiaCsr_NoMerge(4, ALGORITHM_NOMERGE, 0, 0)
INST_my_findStrongestNeighbourBlockDiaCsr_NoMerge(4, ALGORITHM_STOREWEIGHTS, 0, 0)
INST_my_findStrongestNeighbourBlockDiaCsr_NoMerge(4, ALGORITHM_STOREWEIGHTS_2, 0, 0)
INST_my_findStrongestNeighbourBlockDiaCsr_NoMerge(4, ALGORITHM_NOMERGE, 1, 0)
INST_my_findStrongestNeighbourBlockDiaCsr_NoMerge(4, ALGORITHM_STOREWEIGHTS, 1, 0)
INST_my_findStrongestNeighbourBlockDiaCsr_NoMerge(4, ALGORITHM_STOREWEIGHTS_2, 1, 0)
INST_my_findStrongestNeighbourBlockDiaCsr_NoMerge(4, ALGORITHM_NOMERGE, 0, 1) //load only unassigned
INSTmy_findStrongestNeighbourBlockDiaCsr_NoMergeClean(1)
INSTmy_findStrongestNeighbourBlockDiaCsr_NoMergeClean(2)
INSTmy_findStrongestNeighbourBlockDiaCsr_NoMergeClean(4)
#define __load_ __load_streaming
template<int ALREADY_COMPACT>
__global__ void my_blockCompact(
int *partner_index, const int num_rows,
int *unassigned_per_block_in,
int *n_unassigned_per_block, int *unassigned_per_block)
{
int bid = blockIdx.x; //RMV
int tid = threadIdx.x + blockDim.x * blockIdx.x;
for (; bid < num_rows / 256 + 1/*__any(tid < num_rows)*/; tid += gridDim.x * blockDim.x)
{
int row = tid;
bool no_partner = 0; //RMV
if (tid < num_rows)
{
if (ALREADY_COMPACT)
{
row = unassigned_per_block_in[tid];
}
if (partner_index[row] == -1) // Unaggregated row
{
no_partner = 1;
}
}
amgx::strided_reduction::block_binary_compaction<256, 32, 1>(
n_unassigned_per_block, unassigned_per_block, bid,
no_partner, row);
bid += gridDim.x;
}
}
template __global__ void my_blockCompact<0>(
int *partner_index, const int num_rows,
int *unassigned_per_block_in,
int *n_unassigned_per_block, int *unassigned_per_block);
template __global__ void my_blockCompact<1>(
int *partner_index, const int num_rows,
int *unassigned_per_block_in,
int *n_unassigned_per_block, int *unassigned_per_block);
__global__ void my_MatchEdgesWithCompaction(const int num_rows, int *partner_index, int *aggregates, const int *strongest_neighbour, int *sets_per_block,
int *unassigned_per_block_in, int *n_unassigned_per_block, int *unassigned_per_block
)
{
int potential_match, potential_match_neighbour;
int warp_count = 0;
int bid = blockIdx.x; //RMV
int tid = threadIdx.x + blockDim.x * blockIdx.x;
for (; bid < num_rows / 1024 + 1/*__any(tid < num_rows)*/; tid += gridDim.x * blockDim.x)
{
int row = tid;
bool no_partner = 0;
if (tid < num_rows)
{
if (partner_index[row] == -1) // Unaggregated row
{
no_partner = 1;
potential_match = strongest_neighbour[row];
if (potential_match != -1)
{
potential_match_neighbour = __load_(strongest_neighbour + potential_match);
if ( potential_match_neighbour == row ) // we have a match
{
no_partner = 0;
//partner_notnull = 1;//RMV
partner_index[row] = potential_match;
aggregates[row] = ( potential_match > row ) ? row : potential_match;
}
}
}
}
amgx::strided_reduction::block_binary_compaction<1024, 32, 1>(
n_unassigned_per_block, unassigned_per_block, bid,
no_partner, row);
warp_count += amgx::strided_reduction::warp_binary_count(no_partner);
bid += gridDim.x;
}
amgx::strided_reduction::block_count<1, 1024, 32, int>(warp_count, sets_per_block);
}
__global__ void my_MatchEdges(const int num_rows, int *partner_index, int *aggregates, const int *strongest_neighbour, int *sets_per_block)
{
int potential_match, potential_match_neighbour;
int warp_count = 0;
for (int tid = threadIdx.x + blockDim.x * blockIdx.x; utils::any(tid < num_rows); tid += gridDim.x * blockDim.x)
{
bool has_set_partner_index = 0;
if (tid < num_rows)
{
if (partner_index[tid] == -1) // Unaggregated row
{
potential_match = strongest_neighbour[tid];
if (potential_match != -1)
{
potential_match_neighbour = __load_(strongest_neighbour + potential_match);
if ( potential_match_neighbour == tid ) // we have a match
{
has_set_partner_index = 1;
partner_index[tid] = potential_match;
aggregates[tid] = ( potential_match > tid) ? tid : potential_match;
}
}
}
}
warp_count += amgx::strided_reduction::warp_binary_count(has_set_partner_index);
}
amgx::strided_reduction::block_count<1, 1024, 32, int>(warp_count, sets_per_block);
}
// matchEdges
__global__ void my_joinExistingAggregates(int num_rows, int *aggregates, int *aggregated, int *aggregates_candidate, int *sets_per_block)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
int warp_count = 0;
while (utils::any(tid < num_rows))
{
bool has_set_partner_index = 0;
if (tid < num_rows)
{
if (aggregated[tid] == -1 && aggregates_candidate[tid] != -1) // Unaggregated row
{
aggregates[tid] = aggregates_candidate[tid];
aggregated[tid] = 1;
has_set_partner_index = 1;
}
}
warp_count += amgx::strided_reduction::warp_binary_count(has_set_partner_index);
tid += gridDim.x * blockDim.x;
}
amgx::strided_reduction::block_count<1, 1024, 32, int>(warp_count, sets_per_block);
}
// Kernel that checks if perfect matchs exist
__global__ void my_matchAggregates(int *aggregates, int *aggregated, int *strongest_neighbour, const int num_rows, int *sets_per_block)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
int potential_match, potential_match_neighbour, my_aggregate;
int warp_count = 0;
while (utils::any(tid < num_rows))
{
bool has_set_partner_index = 0;
if (tid < num_rows) if (aggregated[tid] == -1) // Unaggregated row
{
potential_match = strongest_neighbour[tid];
if (potential_match != -1)
{
potential_match_neighbour = __load_(strongest_neighbour + potential_match); //or global
my_aggregate = aggregates[tid];
if (potential_match_neighbour == my_aggregate) // we have a match
{
has_set_partner_index = 1;
aggregated[tid] = 1;
aggregates[tid] = ( potential_match > my_aggregate) ? my_aggregate : potential_match;
}
}
}
warp_count += amgx::strided_reduction::warp_binary_count(has_set_partner_index);
tid += gridDim.x * blockDim.x;
}
amgx::strided_reduction::block_count<1, 1024, 32, int>(warp_count, sets_per_block);
}
// Kernel that checks if perfect matchs exist
__global__ void my_matchAggregatesSize4(int *aggregates, int *aggregated, int *strongest_neighbour, int *partner_index, const int num_rows, int *sets_per_block)
{
int potential_match, potential_match_neighbour, my_aggregate;
int warp_count = 0;
for (int tid = threadIdx.x + blockDim.x * blockIdx.x; utils::any(tid < num_rows); tid += blockDim.x * gridDim.x)
{
bool has_set_partner_index = 0;
if (tid < num_rows) if (aggregated[tid] == -1) // Unaggregated row
{
potential_match = strongest_neighbour[tid];
if (potential_match != -1)
{
potential_match_neighbour = __load_(strongest_neighbour + potential_match);
my_aggregate = aggregates[tid];
if (potential_match_neighbour == my_aggregate) // we have a match
{
has_set_partner_index = 1;
aggregated[tid] = 1;
aggregates[tid] = ( potential_match > my_aggregate) ? my_aggregate : potential_match;
partner_index[tid + num_rows] = potential_match;
partner_index[tid + 2 * num_rows] = partner_index[potential_match];
}
}
}
warp_count += amgx::strided_reduction::warp_binary_count(has_set_partner_index);
}
amgx::strided_reduction::block_count<1, 1024, 32, int>(warp_count, sets_per_block);
}
}
}
}
|
the_stack
|
#include <assert.h>
#include <stdint.h>
#include "rxmesh/context.h"
#include "rxmesh/local.h"
#include "rxmesh/types.h"
namespace rxmesh {
template <uint32_t blockThreads>
__device__ __forceinline__ void load_uint16(const uint16_t* in,
const uint16_t size,
uint16_t* out)
{
const uint32_t size32 = size / 2;
const uint32_t reminder = size % 2;
const uint32_t* in32 = reinterpret_cast<const uint32_t*>(in);
uint32_t* out32 = reinterpret_cast<uint32_t*>(out);
for (uint32_t i = threadIdx.x; i < size32; i += blockThreads) {
uint32_t a = in32[i];
out32[i] = a;
}
if (reminder != 0) {
if (threadIdx.x == 0) {
out[size - 1] = in[size - 1];
}
}
}
/**
* @brief load the patch FE
* @param patch_info input patch info
* @param patch_faces output FE
* @return
*/
template <uint32_t blockThreads>
__device__ __forceinline__ void load_patch_FE(const PatchInfo& patch_info,
LocalEdgeT* fe)
{
load_uint16<blockThreads>(reinterpret_cast<const uint16_t*>(patch_info.fe),
patch_info.num_faces * 3,
reinterpret_cast<uint16_t*>(fe));
}
/**
* @brief load the patch EV
* @param patch_info input patch info
* @param ev output EV
* @return
*/
template <uint32_t blockThreads>
__device__ __forceinline__ void load_patch_EV(const PatchInfo& patch_info,
LocalVertexT* ev)
{
const uint32_t num_edges = patch_info.num_edges;
const uint32_t* input_ev32 =
reinterpret_cast<const uint32_t*>(patch_info.ev);
uint32_t* output_ev32 = reinterpret_cast<uint32_t*>(ev);
#pragma unroll 2
for (uint32_t i = threadIdx.x; i < num_edges; i += blockThreads) {
uint32_t a = input_ev32[i];
output_ev32[i] = a;
}
}
/**
* @brief load the patch topology i.e., EV and FE
* @param patch_info input patch info
* @param load_ev input indicates if we should load EV
* @param load_fe input indicates if we should load FE
* @param s_ev where EV will be loaded
* @param s_fe where FE will be loaded
* @return
*/
template <uint32_t blockThreads>
__device__ __forceinline__ void load_mesh(const PatchInfo& patch_info,
const bool load_ev,
const bool load_fe,
LocalVertexT*& s_ev,
LocalEdgeT*& s_fe)
{
if (load_ev) {
load_patch_EV<blockThreads>(patch_info, s_ev);
}
// load patch faces
if (load_fe) {
if (load_ev) {
// if we loaded the edges, then we need to move where
// s_fe is pointing at to avoid overwrite
s_fe =
reinterpret_cast<LocalEdgeT*>(&s_ev[patch_info.num_edges * 2]);
}
load_patch_FE<blockThreads>(patch_info, s_fe);
}
}
template <uint32_t blockThreads>
__device__ __forceinline__ void load_not_owned_local_id(
const uint16_t num_not_owned,
uint16_t* output_not_owned_local_id,
const uint16_t* input_not_owned_local_id)
{
load_uint16<blockThreads>(
input_not_owned_local_id, num_not_owned, output_not_owned_local_id);
}
template <uint32_t blockThreads>
__device__ __forceinline__ void load_not_owned_patch(
const uint16_t num_not_owned,
uint32_t* output_not_owned_patch,
const uint32_t* input_not_owned_patch)
{
for (uint32_t i = threadIdx.x; i < num_not_owned; i += blockThreads) {
output_not_owned_patch[i] = input_not_owned_patch[i];
}
}
/**
* @brief Load local id and patch of the not-owned verteices, edges, or faces
* based on query op.
* @param patch_info input patch info
* @param not_owned_local_id output local id
* @param not_owned_patch output patch id
* @param num_not_owned number of not-owned mesh elements
*/
template <Op op, uint32_t blockThreads>
__device__ __forceinline__ void load_not_owned(const PatchInfo& patch_info,
uint16_t*& not_owned_local_id,
uint32_t*& not_owned_patch,
uint16_t& num_owned)
{
uint32_t num_not_owned = 0;
switch (op) {
case Op::VV: {
num_owned = patch_info.num_owned_vertices;
num_not_owned = patch_info.num_vertices - num_owned;
// should be 4*patch_info.num_edges but VV (offset and values) are
// stored as uint16_t and not_owned_patch is uint32_t* so we need to
// shift the pointer only by half this amount
not_owned_patch = not_owned_patch + 2 * patch_info.num_edges;
not_owned_local_id =
reinterpret_cast<uint16_t*>(not_owned_patch + num_not_owned);
load_not_owned_patch<blockThreads>(
num_not_owned, not_owned_patch, patch_info.not_owned_patch_v);
load_not_owned_local_id<blockThreads>(
num_not_owned,
not_owned_local_id,
reinterpret_cast<uint16_t*>(patch_info.not_owned_id_v));
break;
}
case Op::VE: {
num_owned = patch_info.num_owned_edges;
num_not_owned = patch_info.num_edges - num_owned;
// should be 4*patch_info.num_edges but VE (offset and values) are
// stored as uint16_t and not_owned_patch is uint32_t* so we need to
// shift the pointer only by half this amount
not_owned_patch = not_owned_patch + 2 * patch_info.num_edges;
not_owned_local_id =
reinterpret_cast<uint16_t*>(not_owned_patch + num_not_owned);
load_not_owned_patch<blockThreads>(
num_not_owned, not_owned_patch, patch_info.not_owned_patch_e);
load_not_owned_local_id<blockThreads>(
num_not_owned,
not_owned_local_id,
reinterpret_cast<uint16_t*>(patch_info.not_owned_id_e));
break;
}
case Op::VF: {
num_owned = patch_info.num_owned_faces;
num_not_owned = patch_info.num_faces - num_owned;
uint32_t shift = DIVIDE_UP(
3 * patch_info.num_faces + std::max(3 * patch_info.num_faces,
2 * patch_info.num_edges),
2);
not_owned_patch = not_owned_patch + shift;
not_owned_local_id =
reinterpret_cast<uint16_t*>(not_owned_patch + num_not_owned);
load_not_owned_patch<blockThreads>(
num_not_owned, not_owned_patch, patch_info.not_owned_patch_f);
load_not_owned_local_id<blockThreads>(
num_not_owned,
not_owned_local_id,
reinterpret_cast<uint16_t*>(patch_info.not_owned_id_f));
break;
}
case Op::FV: {
num_owned = patch_info.num_owned_vertices;
num_not_owned = patch_info.num_vertices - num_owned;
assert(2 * patch_info.num_edges >= (1 + 2) * num_not_owned);
not_owned_local_id =
reinterpret_cast<uint16_t*>(not_owned_patch + num_not_owned);
load_not_owned_patch<blockThreads>(
num_not_owned, not_owned_patch, patch_info.not_owned_patch_v);
load_not_owned_local_id<blockThreads>(
num_not_owned,
not_owned_local_id,
reinterpret_cast<uint16_t*>(patch_info.not_owned_id_v));
break;
}
case Op::FE: {
num_owned = patch_info.num_owned_edges;
num_not_owned = patch_info.num_edges - num_owned;
// should be 3*patch_info.num_faces but FE is stored as uint16_t and
// not_owned_patch is uint32_t* so we need to shift the pointer only
// by half this amount
not_owned_patch =
not_owned_patch + DIVIDE_UP(3 * patch_info.num_faces, 2);
not_owned_local_id =
reinterpret_cast<uint16_t*>(not_owned_patch + num_not_owned);
load_not_owned_patch<blockThreads>(
num_not_owned, not_owned_patch, patch_info.not_owned_patch_e);
load_not_owned_local_id<blockThreads>(
num_not_owned,
not_owned_local_id,
reinterpret_cast<uint16_t*>(patch_info.not_owned_id_e));
break;
}
case Op::FF: {
num_owned = patch_info.num_owned_faces;
num_not_owned = patch_info.num_faces - num_owned;
not_owned_local_id =
reinterpret_cast<uint16_t*>(not_owned_patch + num_not_owned);
load_not_owned_patch<blockThreads>(
num_not_owned, not_owned_patch, patch_info.not_owned_patch_f);
load_not_owned_local_id<blockThreads>(
num_not_owned,
not_owned_local_id,
reinterpret_cast<uint16_t*>(patch_info.not_owned_id_f));
break;
}
case Op::EV: {
num_owned = patch_info.num_owned_vertices;
num_not_owned = patch_info.num_vertices - num_owned;
// should be 2*patch_info.num_edges but EV is stored as uint16_t and
// not_owned_patch is uint32_t* so we need to shift the pointer only
// by num_edges
not_owned_patch = not_owned_patch + patch_info.num_edges;
not_owned_local_id =
reinterpret_cast<uint16_t*>(not_owned_patch + num_not_owned);
load_not_owned_patch<blockThreads>(
num_not_owned, not_owned_patch, patch_info.not_owned_patch_v);
load_not_owned_local_id<blockThreads>(
num_not_owned,
not_owned_local_id,
reinterpret_cast<uint16_t*>(patch_info.not_owned_id_v));
break;
}
case Op::EF: {
num_owned = patch_info.num_owned_faces;
num_not_owned = patch_info.num_faces - num_owned;
// should be 6*patch_info.num_faces but EF (offset and values) are
// stored as uint16_t and not_owned_patch is uint32_t* so we need to
// shift the pointer only by half this amount
not_owned_patch = not_owned_patch + 3 * patch_info.num_faces;
not_owned_local_id =
reinterpret_cast<uint16_t*>(not_owned_patch + num_not_owned);
load_not_owned_patch<blockThreads>(
num_not_owned, not_owned_patch, patch_info.not_owned_patch_f);
load_not_owned_local_id<blockThreads>(
num_not_owned,
not_owned_local_id,
reinterpret_cast<uint16_t*>(patch_info.not_owned_id_f));
break;
}
default: {
assert(1 != 1);
break;
}
}
}
} // namespace rxmesh
|
the_stack
|
#include <thrust/transform.h>
#include <thrust/functional.h>
#include <thrust/device_ptr.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <cub/iterator/counting_input_iterator.cuh>
#include <cub/iterator/transform_input_iterator.cuh>
#include <cub/device/device_segmented_radix_sort.cuh>
#include <cub/device/device_radix_sort.cuh>
#include <cub/block/block_radix_sort.cuh>
#include "implicit/gpu/utils.cuh"
#include "implicit/gpu/knn.h"
#include "implicit/gpu/device_buffer.h"
namespace implicit { namespace gpu {
bool is_host_memory(void * address) {
cudaPointerAttributes attr;
auto err = cudaPointerGetAttributes(&attr, address);
if (err == cudaErrorInvalidValue) {
return true;
}
#if __CUDACC_VER_MAJOR__ >= 10
return attr.type == cudaMemoryTypeHost || attr.type == cudaMemoryTypeUnregistered;
#else
return attr.memoryType == cudaMemoryTypeHost || attr.memoryType == cudaMemoryTypeUnregistered;
#endif
}
class StackAllocator {
public:
StackAllocator(size_t bytes) : memory(bytes), allocated(0) {}
void * allocate(size_t bytes) {
size_t padding = bytes % 128;
if (padding) {
bytes += 128 - padding;
}
if (allocated + bytes >= memory.size()) {
throw std::invalid_argument("stack allocator: out of memory");
}
allocations.push_back(bytes);
void * ret = memory.get() + allocated;
allocated += bytes;
return ret;
}
void deallocate(void * ptr) {
size_t bytes = allocations.back();
if (ptr != memory.get() + allocated - bytes) {
throw std::invalid_argument("stack allocator: free called out of order");
}
allocations.pop_back();
allocated -= bytes;
}
protected:
std::vector<size_t> allocations;
DeviceBuffer<char> memory;
size_t allocated;
};
template <typename T>
void copy_columns(const T * input, int rows, int cols, T * output, int output_cols) {
auto count = thrust::make_counting_iterator<int>(0);
thrust::for_each(count, count + (rows * output_cols),
[=] __device__(int i) {
int col = i % output_cols;
int row = i / output_cols;
output[col + row * output_cols] = input[col + row * cols];
});
}
KnnQuery::KnnQuery(size_t temp_memory)
: max_temp_memory(temp_memory),
alloc(new StackAllocator(temp_memory)) {
CHECK_CUBLAS(cublasCreate(&blas_handle));
}
const static int MAX_SELECT_K = 128;
void KnnQuery::topk(const Matrix & items, const Matrix & query, int k,
int * indices, float * distances, float * item_norms,
const COOMatrix * query_filter,
Vector<int> * item_filter) {
if (query.cols != items.cols) {
throw std::invalid_argument("Must have same number of columns in each matrix for topk");
}
size_t available_temp_memory = max_temp_memory;
float * host_distances = NULL;
size_t distances_size = query.rows * k * sizeof(float);
if (is_host_memory(distances)) {
host_distances = distances;
distances = reinterpret_cast<float *>(alloc->allocate(distances_size));
available_temp_memory -= distances_size;
}
int * host_indices = NULL;
size_t indices_size = query.rows * k * sizeof(int);
if (is_host_memory(indices)) {
host_indices = indices;
indices = reinterpret_cast<int *>(alloc->allocate(indices_size));
available_temp_memory -= indices_size;
}
// We need 6 copies of the matrix for argsort code - and then some
// extra memory per SM as well.
int batch_size = available_temp_memory / (sizeof(float) * items.rows);
if (k >= MAX_SELECT_K) {
batch_size *= 0.15;
} else {
batch_size *= 0.5;
}
batch_size = std::min(batch_size, query.rows);
batch_size = std::max(batch_size, 1);
// Create temporary memory for storing results
void * temp_mem = alloc->allocate(batch_size * items.rows * sizeof(float));
Matrix temp_distances(batch_size, items.rows, reinterpret_cast<float *>(temp_mem), false);
for (int start = 0; start < query.rows; start += batch_size) {
auto end = std::min(query.rows, start + batch_size);
Matrix batch(query, start, end);
temp_distances.rows = batch.rows;
// matrix multiple the items by the batch, store in distances
float alpha = 1.0, beta = 0.;
CHECK_CUBLAS(cublasSgemm(blas_handle, CUBLAS_OP_T, CUBLAS_OP_N,
items.rows, batch.rows, items.cols,
&alpha,
items.data, items.cols,
batch.data, batch.cols,
&beta,
temp_distances.data, temp_distances.cols));
// If we have norms (cosine distance etc) normalize the results here
if (item_norms != NULL) {
auto count = thrust::make_counting_iterator<int>(0);
int cols = temp_distances.cols;
float * data = temp_distances.data;
thrust::for_each(count, count + (temp_distances.rows * temp_distances.cols),
[=] __device__(int i) {
data[i] /= item_norms[i % cols];
});
}
if (item_filter != NULL) {
auto count = thrust::make_counting_iterator<int>(0);
float * data = temp_distances.data;
int * items = item_filter->data;
int items_size = item_filter->size;
int cols = temp_distances.cols;
thrust::for_each(count, count + items_size * temp_distances.rows,
[=] __device__(int i) {
int col = items[i % items_size];
int row = i / items_size;
data[row * cols + col] = -FLT_MAX;
});
}
if (query_filter != NULL) {
auto count = thrust::make_counting_iterator<int>(0);
int * row = query_filter->row;
int * col = query_filter->col;
float * data = temp_distances.data;
int items = temp_distances.cols;
thrust::for_each(count, count + query_filter->nonzeros,
[=] __device__(int i) {
if ((row[i] >= start) && (row[i] < end)) {
data[(row[i] -start) * items + col[i]] = -FLT_MAX;
}
});
}
argpartition(temp_distances, k, indices + start * k, distances + start * k);
// TODO: callback per batch (show progress etc)
}
alloc->deallocate(temp_mem);
if (host_indices) {
CHECK_CUDA(cudaMemcpy(host_indices, indices, indices_size, cudaMemcpyDeviceToHost));
alloc->deallocate(indices);
}
if (host_distances) {
CHECK_CUDA(cudaMemcpy(host_distances, distances, distances_size, cudaMemcpyDeviceToHost));
alloc->deallocate(distances);
}
}
static const int ARGPARTITION_BLOCK_DIM_X = 128;
static const int ARGPARTITION_ITEMS_PER_THREAD = 16;
static const int ARGPARTITION_SORT_SIZE = ARGPARTITION_BLOCK_DIM_X * ARGPARTITION_ITEMS_PER_THREAD;
__global__ void argpartition_kernel(const int * indices, const float * distances,
int rows, int cols, int k,
int * out_indices,
float * out_distances) {
using BlockRadixSort = cub::BlockRadixSort<float, ARGPARTITION_BLOCK_DIM_X, ARGPARTITION_ITEMS_PER_THREAD, int>;
__shared__ typename BlockRadixSort::TempStorage shared_mem;
float keys[ARGPARTITION_ITEMS_PER_THREAD];
int values[ARGPARTITION_ITEMS_PER_THREAD];
int rowid = blockIdx.y;
for (int i = 0; i < ARGPARTITION_ITEMS_PER_THREAD; i++) {
int colid = blockIdx.x * blockDim.x + threadIdx.x + i * (blockDim.x * gridDim.x);
if (colid < cols) {
keys[i] = distances[rowid * cols + colid];
values[i] = indices == NULL ? colid : indices[rowid * cols + colid];
} else {
keys[i] = -FLT_MAX;
values[i] = -1;
}
}
BlockRadixSort(shared_mem).SortDescendingBlockedToStriped(keys, values);
if (threadIdx.x < k) {
int out_col = threadIdx.x + blockIdx.x * k;
out_distances[out_col + rowid * k * gridDim.x] = keys[0];
out_indices[out_col + rowid * k * gridDim.x] = values[0];
}
}
void KnnQuery::argpartition(const Matrix & items, int k, int * indices, float * distances) {
k = std::min(k, items.cols);
if (k >= MAX_SELECT_K) {
int * temp_indices = reinterpret_cast<int *>(alloc->allocate(items.rows * items.cols * sizeof(int)));
float * temp_distances = reinterpret_cast<float *>(alloc->allocate(items.rows * items.cols * sizeof(float)));
argsort(items, temp_indices, temp_distances);
copy_columns(temp_distances, items.rows, items.cols, distances, k);
copy_columns(temp_indices, items.rows, items.cols, indices, k);
alloc->deallocate(temp_distances);
alloc->deallocate(temp_indices);
return;
}
int rows = items.rows;
int cols = items.cols;
int blocks_per_row = (cols + ARGPARTITION_SORT_SIZE - 1) / ARGPARTITION_SORT_SIZE;
// maintain a double buffer of input/output indices and distances
float * distA = reinterpret_cast<float *>(alloc->allocate(rows * k * blocks_per_row * sizeof(float)));
int * indA = reinterpret_cast<int *>(alloc->allocate(rows * k * blocks_per_row * sizeof(int)));
blocks_per_row = (blocks_per_row * k + ARGPARTITION_SORT_SIZE - 1) / ARGPARTITION_SORT_SIZE;
float * distB = reinterpret_cast<float *>(alloc->allocate(rows * k * blocks_per_row * sizeof(float)));
int * indB = reinterpret_cast<int *>(alloc->allocate(rows * k * blocks_per_row * sizeof(int)));
const float * input_distances = items.data;
const int * input_indices = NULL;
float * output_distances = distA;
int * output_indices = indA;
bool outputA = true;
while (true) {
int blocks_per_row = (cols + ARGPARTITION_SORT_SIZE - 1) / ARGPARTITION_SORT_SIZE;
dim3 block_count(blocks_per_row, items.rows, 1);
bool final = block_count.x <= 1;
if (final) {
output_distances = distances;
output_indices = indices;
}
argpartition_kernel<<<block_count, ARGPARTITION_BLOCK_DIM_X>>>(
input_indices, input_distances,
rows, cols, k,
output_indices, output_distances);
if (final) break;
// reduce the number of columns we process next iteration to the output of the current
// input
cols = block_count.x * k;
// set the input of the next run to the output of the current run
// (and the output to an unused block of memory)
input_distances = output_distances;
input_indices = output_indices;
output_distances = outputA ? distB : distA;
output_indices = outputA ? indB : indA;
outputA = !outputA;
}
CHECK_CUDA(cudaDeviceSynchronize());
// Free up temp memory
alloc->deallocate(indB);
alloc->deallocate(distB);
alloc->deallocate(indA);
alloc->deallocate(distA);
}
void KnnQuery::argsort(const Matrix & items, int * indices, float * distances) {
// We can't do this in place https://github.com/NVIDIA/cub/issues/238 ?
// so generate temp memory for this
auto temp_indices = reinterpret_cast<int *>(alloc->allocate(items.rows * items.cols * sizeof(int)));
thrust::transform(
thrust::make_counting_iterator<int>(0),
thrust::make_counting_iterator<int>(items.rows * items.cols),
thrust::make_constant_iterator<int>(items.cols),
thrust::device_pointer_cast(temp_indices),
thrust::modulus<int>());
int cols = items.cols;
auto segment_offsets = thrust::make_transform_iterator(thrust::make_counting_iterator<int>(0),
[=] __device__(int i) {
return i * cols;
});
void * temp_mem = NULL;
// sort the values.
if (items.rows > 1) {
size_t temp_size = 0;
auto err = cub::DeviceSegmentedRadixSort::SortPairsDescending(NULL,
temp_size,
items.data,
distances,
temp_indices,
indices,
items.rows * items.cols,
items.rows,
segment_offsets,
segment_offsets + 1);
CHECK_CUDA(err);
temp_mem = alloc->allocate(temp_size);
err = cub::DeviceSegmentedRadixSort::SortPairsDescending(temp_mem,
temp_size,
items.data,
distances,
temp_indices,
indices,
items.rows * items.cols,
items.rows,
segment_offsets,
segment_offsets + 1);
CHECK_CUDA(err);
} else {
size_t temp_size = 0;
auto err = cub::DeviceRadixSort::SortPairsDescending(NULL,
temp_size,
items.data,
distances,
temp_indices,
indices,
items.cols);
CHECK_CUDA(err);
temp_mem = alloc->allocate(temp_size);
err = cub::DeviceRadixSort::SortPairsDescending(temp_mem,
temp_size,
items.data,
distances,
temp_indices,
indices,
items.cols);
CHECK_CUDA(err);
}
alloc->deallocate(temp_mem);
alloc->deallocate(temp_indices);
}
KnnQuery::~KnnQuery() {
// TODO: don't check this, there isn't anything we can do here anyways
CHECK_CUBLAS(cublasDestroy(blas_handle));
}
}} // namespace implicit::gpu
|
the_stack
|
#include "cupoch/geometry/boundingvolume.h"
#include "cupoch/geometry/graph.h"
#include "cupoch/geometry/lineset.h"
#include "cupoch/geometry/pointcloud.h"
#include "cupoch/geometry/trianglemesh.h"
#include "cupoch/geometry/distancetransform.h"
#include "cupoch/geometry/geometry_functor.h"
#include "cupoch/geometry/voxelgrid.h"
#include "cupoch/utility/platform.h"
#include "cupoch/utility/range.h"
#include "cupoch/visualization/shader/shader.h"
#include "cupoch/visualization/shader/simple_shader.h"
#include "cupoch/visualization/utility/color_map.h"
#include "cupoch/visualization/visualizer/render_option.h"
using namespace cupoch;
using namespace cupoch::visualization;
using namespace cupoch::visualization::glsl;
namespace {
// Vertex indices of 12 lines in a cuboid
__constant__ int cuboid_lines_vertex_indices[12][2] = {
{0, 1}, {0, 2}, {0, 4}, {3, 1}, {3, 2}, {3, 7},
{5, 1}, {5, 4}, {5, 7}, {6, 2}, {6, 4}, {6, 7},
};
template <int Dim>
struct copy_pointcloud_functor {
copy_pointcloud_functor(bool has_colors,
RenderOption::PointColorOption color_option,
const ViewControl &view)
: has_colors_(has_colors), color_option_(color_option), view_(view){};
const bool has_colors_;
const RenderOption::PointColorOption color_option_;
const ViewControl view_;
const ColorMap::ColorMapOption colormap_option_ = GetGlobalColorMapOption();
__device__ thrust::tuple<Eigen::Vector3f, Eigen::Vector4f> operator()(
const thrust::tuple<Eigen::Matrix<float, Dim, 1>, Eigen::Vector3f> &pt_cl);
__device__ Eigen::Vector4f GetColor(const Eigen::Vector3f& point,
const Eigen::Vector3f& color) const {
Eigen::Vector4f color_tmp;
color_tmp[3] = 1.0;
switch (color_option_) {
case RenderOption::PointColorOption::XCoordinate:
color_tmp.head<3>() = GetColorMapColor(
view_.GetBoundingBox().GetXPercentage(point(0)),
colormap_option_);
break;
case RenderOption::PointColorOption::YCoordinate:
color_tmp.head<3>() = GetColorMapColor(
view_.GetBoundingBox().GetYPercentage(point(1)),
colormap_option_);
break;
case RenderOption::PointColorOption::ZCoordinate:
color_tmp.head<3>() = GetColorMapColor(
view_.GetBoundingBox().GetZPercentage(point(2)),
colormap_option_);
break;
case RenderOption::PointColorOption::Color:
case RenderOption::PointColorOption::Default:
default:
if (has_colors_) {
color_tmp.head<3>() = color;
} else {
color_tmp.head<3>() = GetColorMapColor(
view_.GetBoundingBox().GetZPercentage(point(2)),
colormap_option_);
}
break;
}
return color_tmp;
}
};
template <>
__device__
thrust::tuple<Eigen::Vector3f, Eigen::Vector4f> copy_pointcloud_functor<3>::operator()(
const thrust::tuple<Eigen::Vector3f, Eigen::Vector3f> &pt_cl) {
const Eigen::Vector3f &point = thrust::get<0>(pt_cl);
const Eigen::Vector3f &color = thrust::get<1>(pt_cl);
return thrust::make_tuple(point, GetColor(point, color));
}
template <>
__device__
thrust::tuple<Eigen::Vector3f, Eigen::Vector4f> copy_pointcloud_functor<2>::operator()(
const thrust::tuple<Eigen::Vector2f, Eigen::Vector3f> &pt_cl) {
const Eigen::Vector3f point = (Eigen::Vector3f() << thrust::get<0>(pt_cl), 0.0).finished();
const Eigen::Vector3f &color = thrust::get<1>(pt_cl);
return thrust::make_tuple(point, GetColor(point, color));
}
struct copy_lineset_functor {
copy_lineset_functor(
const thrust::pair<Eigen::Vector3f, Eigen::Vector3f> *line_coords,
const Eigen::Vector3f *line_colors,
bool has_colors)
: line_coords_(line_coords),
line_colors_(line_colors),
has_colors_(has_colors){};
const thrust::pair<Eigen::Vector3f, Eigen::Vector3f> *line_coords_;
const Eigen::Vector3f *line_colors_;
const bool has_colors_;
const Eigen::Vector3f default_line_color_ = geometry::DEFAULT_LINE_COLOR;
__device__ thrust::tuple<Eigen::Vector3f, Eigen::Vector4f> operator()(
size_t k) const {
int i = k / 2;
int j = k % 2;
Eigen::Vector4f color_tmp;
color_tmp[3] = 1.0;
color_tmp.head<3>() =
(has_colors_) ? line_colors_[i] : default_line_color_;
if (j == 0) {
return thrust::make_tuple(line_coords_[i].first, color_tmp);
} else {
return thrust::make_tuple(line_coords_[i].second, color_tmp);
}
}
};
template <int Dim>
struct line_coordinates_functor {
line_coordinates_functor(const Eigen::Matrix<float, Dim, 1> *points) : points_(points){};
const Eigen::Matrix<float, Dim, 1> *points_;
__device__ thrust::pair<Eigen::Vector3f, Eigen::Vector3f> operator()(
const Eigen::Vector2i &idxs) const;
};
template <>
__device__
thrust::pair<Eigen::Vector3f, Eigen::Vector3f> line_coordinates_functor<3>::operator()(
const Eigen::Vector2i &idxs) const {
return thrust::make_pair(points_[idxs[0]], points_[idxs[1]]);
}
template <>
__device__
thrust::pair<Eigen::Vector3f, Eigen::Vector3f> line_coordinates_functor<2>::operator()(
const Eigen::Vector2i &idxs) const {
const Eigen::Vector3f p1 = (Eigen::Vector3f() << points_[idxs[0]], 0.0).finished();
const Eigen::Vector3f p2 = (Eigen::Vector3f() << points_[idxs[1]], 0.0).finished();
return thrust::make_pair(p1, p2);
}
struct copy_trianglemesh_functor {
copy_trianglemesh_functor(const Eigen::Vector3f *vertices,
const int *triangles,
const Eigen::Vector3f *vertex_colors,
bool has_vertex_colors,
RenderOption::MeshColorOption color_option,
const Eigen::Vector3f &default_mesh_color,
const ViewControl &view)
: vertices_(vertices),
triangles_(triangles),
vertex_colors_(vertex_colors),
has_vertex_colors_(has_vertex_colors),
color_option_(color_option),
default_mesh_color_(default_mesh_color),
view_(view){};
const Eigen::Vector3f *vertices_;
const int *triangles_;
const Eigen::Vector3f *vertex_colors_;
const bool has_vertex_colors_;
const RenderOption::MeshColorOption color_option_;
const Eigen::Vector3f default_mesh_color_;
const ViewControl view_;
const ColorMap::ColorMapOption colormap_option_ = GetGlobalColorMapOption();
__device__ thrust::tuple<Eigen::Vector3f, Eigen::Vector4f> operator()(
size_t k) const {
size_t vi = triangles_[k];
const auto &vertex = vertices_[vi];
Eigen::Vector4f color_tmp;
color_tmp[3] = 1.0;
switch (color_option_) {
case RenderOption::MeshColorOption::XCoordinate:
color_tmp.head<3>() = GetColorMapColor(
view_.GetBoundingBox().GetXPercentage(vertex(0)),
colormap_option_);
break;
case RenderOption::MeshColorOption::YCoordinate:
color_tmp.head<3>() = GetColorMapColor(
view_.GetBoundingBox().GetYPercentage(vertex(1)),
colormap_option_);
break;
case RenderOption::MeshColorOption::ZCoordinate:
color_tmp.head<3>() = GetColorMapColor(
view_.GetBoundingBox().GetZPercentage(vertex(2)),
colormap_option_);
break;
case RenderOption::MeshColorOption::Color:
if (has_vertex_colors_) {
color_tmp.head<3>() = vertex_colors_[vi];
break;
}
case RenderOption::MeshColorOption::Default:
default:
color_tmp.head<3>() = default_mesh_color_;
break;
}
return thrust::make_tuple(vertex, color_tmp);
}
};
struct copy_voxelgrid_line_functor {
copy_voxelgrid_line_functor(const Eigen::Vector3f *vertices,
const geometry::Voxel *voxels,
bool has_colors,
RenderOption::MeshColorOption color_option,
const Eigen::Vector3f &default_mesh_color,
const ViewControl &view)
: vertices_(vertices),
voxels_(voxels),
has_colors_(has_colors),
color_option_(color_option),
default_mesh_color_(default_mesh_color),
view_(view){};
const Eigen::Vector3f *vertices_;
const geometry::Voxel *voxels_;
const bool has_colors_;
const RenderOption::MeshColorOption color_option_;
const Eigen::Vector3f default_mesh_color_;
const ViewControl view_;
const ColorMap::ColorMapOption colormap_option_ = GetGlobalColorMapOption();
__device__ thrust::tuple<Eigen::Vector3f, Eigen::Vector4f> operator()(
size_t idx) const {
int i = idx / (12 * 2);
int jk = idx % (12 * 2);
int j = jk / 2;
int k = jk % 2;
// Voxel color (applied to all points)
Eigen::Vector4f voxel_color;
voxel_color[3] = 1.0;
switch (color_option_) {
case RenderOption::MeshColorOption::XCoordinate:
voxel_color.head<3>() =
GetColorMapColor(view_.GetBoundingBox().GetXPercentage(
vertices_[i * 8](0)),
colormap_option_);
break;
case RenderOption::MeshColorOption::YCoordinate:
voxel_color.head<3>() =
GetColorMapColor(view_.GetBoundingBox().GetYPercentage(
vertices_[i * 8](1)),
colormap_option_);
break;
case RenderOption::MeshColorOption::ZCoordinate:
voxel_color.head<3>() =
GetColorMapColor(view_.GetBoundingBox().GetZPercentage(
vertices_[i * 8](2)),
colormap_option_);
break;
case RenderOption::MeshColorOption::Color:
if (has_colors_) {
voxel_color.head<3>() = voxels_[i].color_;
break;
}
case RenderOption::MeshColorOption::Default:
default:
voxel_color.head<3>() = default_mesh_color_;
break;
}
return thrust::make_tuple(
vertices_[i * 8 + cuboid_lines_vertex_indices[j][k]],
voxel_color);
}
};
struct copy_distance_voxel_functor {
copy_distance_voxel_functor(float voxel_size,
int resolution,
const Eigen::Vector3f& origin,
float distance_max)
: voxel_size_(voxel_size), resolution_(resolution),
origin_(origin), distance_max_(distance_max){};
const float voxel_size_;
const int resolution_;
const Eigen::Vector3f origin_;
const float distance_max_;
__device__ thrust::tuple<Eigen::Vector3f, Eigen::Vector4f>
operator()(const thrust::tuple<size_t, geometry::DistanceVoxel>& kv) const {
int idx = thrust::get<0>(kv);
geometry::DistanceVoxel v = thrust::get<1>(kv);
int res2 = resolution_ * resolution_;
int x = idx / res2;
int yz = idx % res2;
int y = yz / resolution_;
int z = yz % resolution_;
// Voxel color (applied to all points)
Eigen::Vector4f voxel_color = Eigen::Vector4f::Ones();
int h_res = resolution_ / 2;
Eigen::Vector3f pt = (Eigen::Vector3i(x - h_res, y - h_res, z - h_res).cast<float>() + Eigen::Vector3f::Constant(0.5)) * voxel_size_ - origin_;
voxel_color[3] = 1.0 - min(v.distance_, distance_max_) / distance_max_;
return thrust::make_tuple(pt, voxel_color);
}
};
struct alpha_greater_functor {
__device__ bool operator() (const thrust::tuple<Eigen::Vector3f, Eigen::Vector4f>& lhs,
const thrust::tuple<Eigen::Vector3f, Eigen::Vector4f>& rhs) const {
return thrust::get<1>(lhs)[3] > thrust::get<1>(rhs)[3];
}
};
} // namespace
bool SimpleShader::Compile() {
if (CompileShaders(simple_vertex_shader, NULL, simple_fragment_shader) ==
false) {
PrintShaderWarning("Compiling shaders failed.");
return false;
}
vertex_position_ = glGetAttribLocation(program_, "vertex_position");
vertex_color_ = glGetAttribLocation(program_, "vertex_color");
MVP_ = glGetUniformLocation(program_, "MVP");
return true;
}
void SimpleShader::Release() {
UnbindGeometry(true);
ReleaseProgram();
}
bool SimpleShader::BindGeometry(const geometry::Geometry &geometry,
const RenderOption &option,
const ViewControl &view) {
// If there is already geometry, we first unbind it.
// We use GL_STATIC_DRAW. When geometry changes, we clear buffers and
// rebind the geometry. Note that this approach is slow. If the geometry is
// changing per frame, consider implementing a new ShaderWrapper using
// GL_STREAM_DRAW, and replace InvalidateGeometry() with Buffer Object
// Streaming mechanisms.
UnbindGeometry();
// Prepare data to be passed to GPU
const size_t num_data_size = GetDataSize(geometry);
// Create buffers and bind the geometry
glGenBuffers(1, &vertex_position_buffer_);
glBindBuffer(GL_ARRAY_BUFFER, vertex_position_buffer_);
glBufferData(GL_ARRAY_BUFFER, num_data_size * sizeof(Eigen::Vector3f), 0,
GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
cudaSafeCall(cudaGraphicsGLRegisterBuffer(&cuda_graphics_resources_[0],
vertex_position_buffer_,
cudaGraphicsMapFlagsNone));
glGenBuffers(1, &vertex_color_buffer_);
glBindBuffer(GL_ARRAY_BUFFER, vertex_color_buffer_);
glBufferData(GL_ARRAY_BUFFER, num_data_size * sizeof(Eigen::Vector4f), 0,
GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
cudaSafeCall(cudaGraphicsGLRegisterBuffer(&cuda_graphics_resources_[1],
vertex_color_buffer_,
cudaGraphicsMapFlagsNone));
Eigen::Vector3f *raw_points_ptr;
Eigen::Vector4f *raw_colors_ptr;
size_t n_bytes;
cudaSafeCall(cudaGraphicsMapResources(2, cuda_graphics_resources_));
cudaSafeCall(cudaGraphicsResourceGetMappedPointer(
(void **)&raw_points_ptr, &n_bytes, cuda_graphics_resources_[0]));
cudaSafeCall(cudaGraphicsResourceGetMappedPointer(
(void **)&raw_colors_ptr, &n_bytes, cuda_graphics_resources_[1]));
thrust::device_ptr<Eigen::Vector3f> dev_points_ptr =
thrust::device_pointer_cast(raw_points_ptr);
thrust::device_ptr<Eigen::Vector4f> dev_colors_ptr =
thrust::device_pointer_cast(raw_colors_ptr);
if (PrepareBinding(geometry, option, view, dev_points_ptr,
dev_colors_ptr) == false) {
PrintShaderWarning("Binding failed when preparing data.");
return false;
}
Unmap(2);
bound_ = true;
return true;
}
bool SimpleShader::RenderGeometry(const geometry::Geometry &geometry,
const RenderOption &option,
const ViewControl &view) {
if (PrepareRendering(geometry, option, view) == false) {
PrintShaderWarning("Rendering failed during preparation.");
return false;
}
glUseProgram(program_);
glUniformMatrix4fv(MVP_, 1, GL_FALSE, view.GetMVPMatrix().data());
glEnableVertexAttribArray(vertex_position_);
glBindBuffer(GL_ARRAY_BUFFER, vertex_position_buffer_);
glVertexAttribPointer(vertex_position_, 3, GL_FLOAT, GL_FALSE, 0, NULL);
glEnableVertexAttribArray(vertex_color_);
glBindBuffer(GL_ARRAY_BUFFER, vertex_color_buffer_);
glVertexAttribPointer(vertex_color_, 4, GL_FLOAT, GL_FALSE, 0, NULL);
glDrawArrays(draw_arrays_mode_, 0, draw_arrays_size_);
glDisableVertexAttribArray(vertex_position_);
glDisableVertexAttribArray(vertex_color_);
return true;
}
void SimpleShader::UnbindGeometry(bool finalize) {
if (bound_) {
if (!finalize) {
cudaSafeCall(cudaGraphicsUnregisterResource(
cuda_graphics_resources_[0]));
cudaSafeCall(cudaGraphicsUnregisterResource(
cuda_graphics_resources_[1]));
}
glDeleteBuffers(1, &vertex_position_buffer_);
glDeleteBuffers(1, &vertex_color_buffer_);
bound_ = false;
}
}
bool SimpleShaderForPointCloud::PrepareRendering(
const geometry::Geometry &geometry,
const RenderOption &option,
const ViewControl &view) {
if (geometry.GetGeometryType() !=
geometry::Geometry::GeometryType::PointCloud) {
PrintShaderWarning("Rendering type is not geometry::PointCloud.");
return false;
}
glPointSize(GLfloat(option.point_size_));
glEnable(GL_DEPTH_TEST);
glDepthFunc(GLenum(option.GetGLDepthFunc()));
return true;
}
bool SimpleShaderForPointCloud::PrepareBinding(
const geometry::Geometry &geometry,
const RenderOption &option,
const ViewControl &view,
thrust::device_ptr<Eigen::Vector3f> &points,
thrust::device_ptr<Eigen::Vector4f> &colors) {
if (geometry.GetGeometryType() !=
geometry::Geometry::GeometryType::PointCloud) {
PrintShaderWarning("Rendering type is not geometry::PointCloud.");
return false;
}
const geometry::PointCloud &pointcloud =
(const geometry::PointCloud &)geometry;
if (pointcloud.HasPoints() == false) {
PrintShaderWarning("Binding failed with empty pointcloud.");
return false;
}
copy_pointcloud_functor<3> func(pointcloud.HasColors(),
option.point_color_option_, view);
if (pointcloud.HasColors()) {
thrust::transform(
make_tuple_begin(pointcloud.points_, pointcloud.colors_),
make_tuple_end(pointcloud.points_, pointcloud.colors_),
make_tuple_iterator(points, colors), func);
} else {
thrust::transform(
make_tuple_iterator(pointcloud.points_.begin(),
thrust::constant_iterator<Eigen::Vector3f>(
Eigen::Vector3f::Zero())),
make_tuple_iterator(pointcloud.points_.end(),
thrust::constant_iterator<Eigen::Vector3f>(
Eigen::Vector3f::Zero())),
make_tuple_iterator(points, colors), func);
}
draw_arrays_mode_ = GL_POINTS;
draw_arrays_size_ = GLsizei(pointcloud.points_.size());
return true;
}
size_t SimpleShaderForPointCloud::GetDataSize(
const geometry::Geometry &geometry) const {
return ((const geometry::PointCloud &)geometry).points_.size();
}
bool SimpleShaderForLineSet::PrepareRendering(
const geometry::Geometry &geometry,
const RenderOption &option,
const ViewControl &view) {
if (geometry.GetGeometryType() !=
geometry::Geometry::GeometryType::LineSet) {
PrintShaderWarning("Rendering type is not geometry::LineSet.");
return false;
}
glLineWidth(GLfloat(option.line_width_));
glEnable(GL_DEPTH_TEST);
glDepthFunc(GLenum(option.GetGLDepthFunc()));
return true;
}
bool SimpleShaderForLineSet::PrepareBinding(
const geometry::Geometry &geometry,
const RenderOption &option,
const ViewControl &view,
thrust::device_ptr<Eigen::Vector3f> &points,
thrust::device_ptr<Eigen::Vector4f> &colors) {
if (geometry.GetGeometryType() !=
geometry::Geometry::GeometryType::LineSet) {
PrintShaderWarning("Rendering type is not geometry::LineSet.");
return false;
}
const geometry::LineSet<3> &lineset =
(const geometry::LineSet<3> &)geometry;
if (lineset.HasLines() == false) {
PrintShaderWarning("Binding failed with empty geometry::LineSet.");
return false;
}
utility::device_vector<thrust::pair<Eigen::Vector3f, Eigen::Vector3f>>
line_coords(lineset.lines_.size());
line_coordinates_functor<3> func_line(
thrust::raw_pointer_cast(lineset.points_.data()));
thrust::transform(lineset.lines_.begin(), lineset.lines_.end(),
line_coords.begin(), func_line);
copy_lineset_functor func_cp(
thrust::raw_pointer_cast(line_coords.data()),
thrust::raw_pointer_cast(lineset.colors_.data()),
lineset.HasColors());
thrust::transform(thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator(lineset.lines_.size() * 2),
make_tuple_iterator(points, colors), func_cp);
draw_arrays_mode_ = GL_LINES;
draw_arrays_size_ = GLsizei(lineset.lines_.size() * 2);
return true;
}
size_t SimpleShaderForLineSet::GetDataSize(
const geometry::Geometry &geometry) const {
return ((const geometry::LineSet<3> &)geometry).lines_.size() * 2;
}
template <int Dim>
bool SimpleShaderForGraphNode<Dim>::PrepareRendering(
const geometry::Geometry &geometry,
const RenderOption &option,
const ViewControl &view) {
if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::Graph) {
PrintShaderWarning("Rendering type is not geometry::Graph.");
return false;
}
glPointSize(GLfloat(option.point_size_));
glEnable(GL_DEPTH_TEST);
glDepthFunc(GLenum(option.GetGLDepthFunc()));
return true;
}
template <int Dim>
bool SimpleShaderForGraphNode<Dim>::PrepareBinding(
const geometry::Geometry &geometry,
const RenderOption &option,
const ViewControl &view,
thrust::device_ptr<Eigen::Vector3f> &points,
thrust::device_ptr<Eigen::Vector4f> &colors) {
if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::Graph) {
PrintShaderWarning("Rendering type is not geometry::Graph.");
return false;
}
const geometry::Graph<Dim> &graph = (const geometry::Graph<Dim> &)geometry;
if (graph.HasPoints() == false) {
PrintShaderWarning("Binding failed with empty graph.");
return false;
}
copy_pointcloud_functor<Dim> func(graph.HasColors(), option.point_color_option_,
view);
if (graph.HasNodeColors()) {
thrust::transform(make_tuple_begin(graph.points_, graph.node_colors_),
make_tuple_end(graph.points_, graph.node_colors_),
make_tuple_iterator(points, colors), func);
} else {
thrust::transform(
make_tuple_iterator(graph.points_.begin(),
thrust::constant_iterator<Eigen::Vector3f>(
Eigen::Vector3f::Ones())),
make_tuple_iterator(graph.points_.end(),
thrust::constant_iterator<Eigen::Vector3f>(
Eigen::Vector3f::Ones())),
make_tuple_iterator(points, colors), func);
}
draw_arrays_mode_ = GL_POINTS;
draw_arrays_size_ = GLsizei(graph.points_.size());
return true;
}
template <int Dim>
size_t SimpleShaderForGraphNode<Dim>::GetDataSize(
const geometry::Geometry &geometry) const {
return ((const geometry::Graph<Dim> &)geometry).points_.size();
}
template class SimpleShaderForGraphNode<2>;
template class SimpleShaderForGraphNode<3>;
template <int Dim>
bool SimpleShaderForGraphEdge<Dim>::PrepareRendering(
const geometry::Geometry &geometry,
const RenderOption &option,
const ViewControl &view) {
if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::Graph) {
PrintShaderWarning("Rendering type is not geometry::Graph.");
return false;
}
glLineWidth(GLfloat(option.line_width_));
glEnable(GL_DEPTH_TEST);
glDepthFunc(GLenum(option.GetGLDepthFunc()));
return true;
}
template <int Dim>
bool SimpleShaderForGraphEdge<Dim>::PrepareBinding(
const geometry::Geometry &geometry,
const RenderOption &option,
const ViewControl &view,
thrust::device_ptr<Eigen::Vector3f> &points,
thrust::device_ptr<Eigen::Vector4f> &colors) {
if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::Graph) {
PrintShaderWarning("Rendering type is not geometry::Graph.");
return false;
}
const geometry::Graph<Dim> &graph = (const geometry::Graph<Dim> &)geometry;
if (graph.HasLines() == false) {
PrintShaderWarning("Binding failed with empty geometry::Graph.");
return false;
}
utility::device_vector<thrust::pair<Eigen::Vector3f, Eigen::Vector3f>>
line_coords(graph.lines_.size());
line_coordinates_functor<Dim> func_line(
thrust::raw_pointer_cast(graph.points_.data()));
thrust::transform(graph.lines_.begin(), graph.lines_.end(),
line_coords.begin(), func_line);
copy_lineset_functor func_cp(thrust::raw_pointer_cast(line_coords.data()),
thrust::raw_pointer_cast(graph.colors_.data()),
graph.HasColors());
thrust::transform(thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator(graph.lines_.size() * 2),
make_tuple_iterator(points, colors), func_cp);
draw_arrays_mode_ = GL_LINES;
draw_arrays_size_ = GLsizei(graph.lines_.size() * 2);
return true;
}
template <int Dim>
size_t SimpleShaderForGraphEdge<Dim>::GetDataSize(
const geometry::Geometry &geometry) const {
return ((const geometry::Graph<Dim> &)geometry).lines_.size() * 2;
}
template class SimpleShaderForGraphEdge<2>;
template class SimpleShaderForGraphEdge<3>;
bool SimpleShaderForAxisAlignedBoundingBox::PrepareRendering(
const geometry::Geometry &geometry,
const RenderOption &option,
const ViewControl &view) {
if (geometry.GetGeometryType() !=
geometry::Geometry::GeometryType::AxisAlignedBoundingBox) {
PrintShaderWarning(
"Rendering type is not geometry::AxisAlignedBoundingBox.");
return false;
}
glLineWidth(GLfloat(option.line_width_));
glEnable(GL_DEPTH_TEST);
glDepthFunc(GLenum(option.GetGLDepthFunc()));
return true;
}
bool SimpleShaderForAxisAlignedBoundingBox::PrepareBinding(
const geometry::Geometry &geometry,
const RenderOption &option,
const ViewControl &view,
thrust::device_ptr<Eigen::Vector3f> &points,
thrust::device_ptr<Eigen::Vector4f> &colors) {
if (geometry.GetGeometryType() !=
geometry::Geometry::GeometryType::AxisAlignedBoundingBox) {
PrintShaderWarning(
"Rendering type is not geometry::AxisAlignedBoundingBox.");
return false;
}
auto lineset = geometry::LineSet<3>::CreateFromAxisAlignedBoundingBox(
(const geometry::AxisAlignedBoundingBox<3> &)geometry);
utility::device_vector<thrust::pair<Eigen::Vector3f, Eigen::Vector3f>>
line_coords(lineset->lines_.size());
line_coordinates_functor<3> func_line(
thrust::raw_pointer_cast(lineset->points_.data()));
thrust::transform(lineset->lines_.begin(), lineset->lines_.end(),
line_coords.begin(), func_line);
copy_lineset_functor func_cp(
thrust::raw_pointer_cast(line_coords.data()),
thrust::raw_pointer_cast(lineset->colors_.data()),
lineset->HasColors());
thrust::transform(
thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator(lineset->lines_.size() * 2),
make_tuple_iterator(points, colors), func_cp);
draw_arrays_mode_ = GL_LINES;
draw_arrays_size_ = GLsizei(lineset->lines_.size() * 2);
return true;
}
size_t SimpleShaderForAxisAlignedBoundingBox::GetDataSize(
const geometry::Geometry &geometry) const {
auto lineset = geometry::LineSet<3>::CreateFromAxisAlignedBoundingBox(
(const geometry::AxisAlignedBoundingBox<3> &)geometry);
return lineset->lines_.size() * 2;
}
bool SimpleShaderForTriangleMesh::PrepareRendering(
const geometry::Geometry &geometry,
const RenderOption &option,
const ViewControl &view) {
if (geometry.GetGeometryType() !=
geometry::Geometry::GeometryType::TriangleMesh) {
PrintShaderWarning("Rendering type is not geometry::TriangleMesh.");
return false;
}
if (option.mesh_show_back_face_) {
glDisable(GL_CULL_FACE);
} else {
glEnable(GL_CULL_FACE);
}
glEnable(GL_DEPTH_TEST);
glDepthFunc(GLenum(option.GetGLDepthFunc()));
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
if (option.mesh_show_wireframe_) {
glEnable(GL_POLYGON_OFFSET_FILL);
glPolygonOffset(1.0, 1.0);
} else {
glDisable(GL_POLYGON_OFFSET_FILL);
}
return true;
}
bool SimpleShaderForTriangleMesh::PrepareBinding(
const geometry::Geometry &geometry,
const RenderOption &option,
const ViewControl &view,
thrust::device_ptr<Eigen::Vector3f> &points,
thrust::device_ptr<Eigen::Vector4f> &colors) {
if (geometry.GetGeometryType() !=
geometry::Geometry::GeometryType::TriangleMesh) {
PrintShaderWarning("Rendering type is not geometry::TriangleMesh.");
return false;
}
const geometry::TriangleMesh &mesh =
(const geometry::TriangleMesh &)geometry;
if (mesh.HasTriangles() == false) {
PrintShaderWarning("Binding failed with empty triangle mesh.");
return false;
}
copy_trianglemesh_functor func(
thrust::raw_pointer_cast(mesh.vertices_.data()),
(int *)(thrust::raw_pointer_cast(mesh.triangles_.data())),
thrust::raw_pointer_cast(mesh.vertex_colors_.data()),
mesh.HasVertexColors(), option.mesh_color_option_,
option.default_mesh_color_, view);
thrust::transform(
thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator(mesh.triangles_.size() * 3),
make_tuple_iterator(points, colors), func);
draw_arrays_mode_ = GL_TRIANGLES;
draw_arrays_size_ = GLsizei(mesh.triangles_.size() * 3);
return true;
}
size_t SimpleShaderForTriangleMesh::GetDataSize(
const geometry::Geometry &geometry) const {
return ((const geometry::TriangleMesh &)geometry).triangles_.size() * 3;
}
bool SimpleShaderForVoxelGridLine::PrepareRendering(
const geometry::Geometry &geometry,
const RenderOption &option,
const ViewControl &view) {
if (geometry.GetGeometryType() !=
geometry::Geometry::GeometryType::VoxelGrid) {
PrintShaderWarning("Rendering type is not geometry::VoxelGrid.");
return false;
}
glDisable(GL_CULL_FACE);
glEnable(GL_DEPTH_TEST);
glDepthFunc(GLenum(option.GetGLDepthFunc()));
return true;
}
bool SimpleShaderForVoxelGridLine::PrepareBinding(
const geometry::Geometry &geometry,
const RenderOption &option,
const ViewControl &view,
thrust::device_ptr<Eigen::Vector3f> &points,
thrust::device_ptr<Eigen::Vector4f> &colors) {
if (geometry.GetGeometryType() !=
geometry::Geometry::GeometryType::VoxelGrid) {
PrintShaderWarning("Rendering type is not geometry::VoxelGrid.");
return false;
}
const geometry::VoxelGrid &voxel_grid =
(const geometry::VoxelGrid &)geometry;
if (voxel_grid.HasVoxels() == false) {
PrintShaderWarning("Binding failed with empty voxel grid.");
return false;
}
utility::device_vector<Eigen::Vector3f> vertices(
voxel_grid.voxels_values_.size() * 8);
thrust::tiled_range<
thrust::counting_iterator<size_t>>
irange(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator<size_t>(8),
voxel_grid.voxels_values_.size());
auto gfunc = geometry::get_grid_index_functor<geometry::Voxel, Eigen::Vector3i>();
auto begin = thrust::make_transform_iterator(voxel_grid.voxels_values_.begin(), gfunc);
thrust::repeated_range<decltype(begin)>
vrange(begin, thrust::make_transform_iterator(voxel_grid.voxels_values_.end(), gfunc), 8);
geometry::compute_voxel_vertices_functor<Eigen::Vector3i> func1(voxel_grid.origin_, voxel_grid.voxel_size_);
thrust::transform(make_tuple_begin(irange, vrange), make_tuple_end(irange, vrange),
vertices.begin(), func1);
size_t n_out = voxel_grid.voxels_values_.size() * 12 * 2;
copy_voxelgrid_line_functor func2(
thrust::raw_pointer_cast(vertices.data()),
thrust::raw_pointer_cast(voxel_grid.voxels_values_.data()),
voxel_grid.HasColors(), option.mesh_color_option_,
option.default_mesh_color_, view);
thrust::transform(thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator(n_out),
make_tuple_iterator(points, colors), func2);
draw_arrays_mode_ = GL_LINES;
draw_arrays_size_ = GLsizei(n_out);
return true;
}
size_t SimpleShaderForVoxelGridLine::GetDataSize(
const geometry::Geometry &geometry) const {
return ((const geometry::VoxelGrid &)geometry).voxels_values_.size() * 12 *
2;
}
bool SimpleShaderForDistanceTransform::PrepareRendering(
const geometry::Geometry &geometry,
const RenderOption &option,
const ViewControl &view) {
if (geometry.GetGeometryType() !=
geometry::Geometry::GeometryType::DistanceTransform) {
PrintShaderWarning("Rendering type is not geometry::DistanceTransform.");
return false;
}
glPointSize(GLfloat(option.point_size_));
glEnable(GL_DEPTH_TEST);
glDepthFunc(GLenum(option.GetGLDepthFunc()));
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
return true;
}
bool SimpleShaderForDistanceTransform::PrepareBinding(
const geometry::Geometry &geometry,
const RenderOption &option,
const ViewControl &view,
thrust::device_ptr<Eigen::Vector3f> &points,
thrust::device_ptr<Eigen::Vector4f> &colors) {
if (geometry.GetGeometryType() !=
geometry::Geometry::GeometryType::DistanceTransform) {
PrintShaderWarning("Rendering type is not geometry::DistanceTransform.");
return false;
}
const geometry::DistanceTransform &dist_trans =
(const geometry::DistanceTransform &)geometry;
if (dist_trans.IsEmpty()) {
PrintShaderWarning("Binding failed with empty distance transform.");
return false;
}
size_t n_out = dist_trans.voxels_.size();
copy_distance_voxel_functor
func(dist_trans.voxel_size_, dist_trans.resolution_, dist_trans.origin_,
dist_trans.voxel_size_ * dist_trans.resolution_ * 0.1);
thrust::transform(make_tuple_iterator(thrust::make_counting_iterator<size_t>(0), dist_trans.voxels_.begin()),
make_tuple_iterator(thrust::make_counting_iterator(n_out), dist_trans.voxels_.end()),
make_tuple_iterator(points, colors), func);
auto tp_begin = make_tuple_iterator(points, colors);
thrust::sort(utility::exec_policy(0)->on(0),
tp_begin, tp_begin + n_out, alpha_greater_functor());
draw_arrays_mode_ = GL_POINTS;
draw_arrays_size_ = GLsizei(n_out);
return true;
}
size_t SimpleShaderForDistanceTransform::GetDataSize(
const geometry::Geometry &geometry) const {
int res = ((const geometry::DistanceTransform &)geometry).resolution_;
return res * res * res;
}
|
the_stack
|
#include <iostream>
#include <cstdint>
#include "include/gossip.cuh"
#include "include/hpc_helpers/include/cuda_helpers.cuh"
#include "include/hpc_helpers/include/timers.cuh"
using gpu_id_t = gossip::gpu_id_t;
using namespace helpers;
#define BIG_CONSTANT(x) (x##LLU)
__host__ __device__ uint64_t fmix64(uint64_t k) {
k ^= k >> 33;
k *= BIG_CONSTANT(0xff51afd7ed558ccd);
k ^= k >> 33;
k *= BIG_CONSTANT(0xc4ceb9fe1a85ec53);
k ^= k >> 33;
return k;
}
template <typename data_t>
__global__
void memset_kernel(data_t * data, size_t capacity, const data_t value)
{
for (size_t thid = blockDim.x*blockIdx.x+threadIdx.x; thid < capacity; thid += blockDim.x*gridDim.x)
{
data[thid] = value;
}
}
template<
typename data_t>
void memset_all(
gossip::context_t& context,
std::vector<data_t *>& data,
const std::vector<size_t>& lengths,
const data_t init_data = 0
) {
gpu_id_t num_gpus = context.get_num_devices();
for (gpu_id_t gpu = 0; gpu < num_gpus; ++gpu){
cudaSetDevice(context.get_device_id(gpu));
memset_kernel<<<256, 1024, 0, context.get_streams(gpu)[0]>>>
(data[gpu], lengths[gpu], init_data);
}
}
template <
typename value_t,
typename index_t> __global__
void generate_data(
value_t * data,
index_t const length,
index_t const offset
) {
const uint64_t thid = blockDim.x*blockIdx.x+threadIdx.x;
for (uint64_t i = thid; i < length; i += blockDim.x*gridDim.x) {
data[i] = fmix64(offset+i+1);
}
}
template<
typename data_t>
void generate_all(
gossip::context_t& context,
std::vector<data_t *>& data,
const std::vector<size_t>& lengths
) {
gpu_id_t num_gpus = context.get_num_devices();
for (gpu_id_t gpu = 0; gpu < num_gpus; ++gpu){
cudaSetDevice(context.get_device_id(gpu));
generate_data<<<256, 1024, 0, context.get_streams(gpu)[0]>>>
(data[gpu], lengths[gpu], gpu*lengths[gpu]);
}
}
template <
typename value_t,
typename index_t,
typename gpu_id_t,
typename funct_t> __global__
void validate(
value_t const * const data,
index_t const length,
gpu_id_t const device_id,
funct_t const predicate) {
const uint64_t thid = blockDim.x*blockIdx.x+threadIdx.x;
for (uint64_t i = thid; i < length; i += blockDim.x*gridDim.x)
if(predicate(data[i]) != device_id)
printf("ERROR on gpu %lu at index %lu: %lu with predicate %lu \n",
uint64_t(device_id-1), i, uint64_t(data[i]), predicate(data[i]));
}
void print_partition_table(const std::vector<std::vector<size_t>>& table) {
std::cout << "\nPartition Table:" << std::endl;
for (gpu_id_t src = 0; src < table.size(); src++) {
for (gpu_id_t trg = 0; trg < table[src].size(); trg++)
std::cout << table[src][trg] << ' ';
std::cout << '\n';
}
std::cout << std::endl;
}
void print_buffer_sizes(const std::vector<size_t>& bufs_lens) {
std::cout << "Required buffer sizes:" << std::endl;
for (const auto& buf_len : bufs_lens) {
std::cout << buf_len << ' ';
}
std::cout << '\n' << std::endl;
}
template<
typename data_t,
class T1,
class T2,
class T3,
class T4>
void run_multisplit_all2all(
T1& context,
T2& all2all,
T3& multisplit,
T4& point2point,
const size_t batch_size,
const size_t batch_size_secure)
{
const gpu_id_t num_gpus = context.get_num_devices();
std::cout << "INFO: " << sizeof(data_t)*batch_size*num_gpus << " bytes (all2all)" << std::endl;
std::vector<data_t *> srcs(num_gpus);
std::vector<data_t *> dsts(num_gpus);
const std::vector<size_t> lens(num_gpus, batch_size);
const std::vector<size_t> mems_lens(num_gpus, batch_size_secure);
{
GpuTimer gtimer(context.get_streams(0)[0], "malloc_devices", context.get_device_id(0), std::cout);
for (gpu_id_t gpu = 0; gpu < num_gpus; ++gpu) {
cudaSetDevice(context.get_device_id(gpu)); CUERR
cudaMalloc(&srcs[gpu], sizeof(data_t)*mems_lens[gpu]); CUERR
cudaMalloc(&dsts[gpu], sizeof(data_t)*mems_lens[gpu]); CUERR
}
}
{
GpuTimer gtimer(context.get_streams(0)[0], "zero_gpu_buffers", context.get_device_id(0), std::cout);
memset_all(context, srcs, mems_lens, data_t(0));
memset_all(context, dsts, mems_lens, data_t(0));
context.sync_all_streams();
CUERR
}
// generate batch of data on each device
{
GpuTimer gtimer(context.get_streams(0)[0], "init_data", context.get_device_id(0), std::cout);
generate_all(context, srcs, lens);
context.sync_all_streams();
CUERR
}
// perform multisplit on each device
auto part_hash = [=] DEVICEQUALIFIER (const data_t& x){
return (x % num_gpus);
};
std::vector<std::vector<size_t>> table(num_gpus, std::vector<size_t>(num_gpus));
{
GpuTimer gtimer(context.get_streams(0)[0], "multisplit", context.get_device_id(0), std::cout);
multisplit.execAsync(srcs, lens, dsts, lens, table, part_hash);
multisplit.sync();
}
print_partition_table(table);
// prepare all2all --------------------------------------------------------
srcs.swap(dsts);
// reset dsts and buffer to zero
memset_all(context, dsts, mems_lens, data_t(0));
context.sync_all_streams();
CUERR
// all2all.show_plan();
{
GpuTimer gtimer(context.get_streams(0)[0], "all2all", context.get_device_id(0), std::cout);
all2all.execAsync(srcs, mems_lens, dsts, mems_lens, table);
all2all.sync();
}
{
GpuTimer gtimer(context.get_streams(0)[0], "validate", context.get_device_id(0), std::cout);
std::vector<size_t> lengths(num_gpus);
for (gpu_id_t trg = 0; trg < num_gpus; trg++) {
lengths[trg] = 0;
for (gpu_id_t src = 0; src < num_gpus; src++)
lengths[trg] += table[src][trg];
}
for (gpu_id_t gpu = 0; gpu < num_gpus; gpu++) {
cudaSetDevice(context.get_device_id(gpu));
validate<<<256, 1024, 0, context.get_streams(gpu)[0]>>>
(dsts[gpu], lengths[gpu], gpu, part_hash);
}
CUERR
}
// cleanup ----------------------------------------------------------------
context.sync_hard();
for (gpu_id_t gpu = 0; gpu < num_gpus; ++gpu) {
cudaSetDevice(context.get_device_id(gpu)); CUERR
cudaFree(srcs[gpu]); CUERR
cudaFree(dsts[gpu]); CUERR
}
}
template<
typename data_t,
class T1,
class T2,
class T3,
class T4>
void run_multisplit_all2all_async(
T1& context,
T2& all2all,
T3& multisplit,
T4& point2point,
const size_t batch_size,
const size_t batch_size_secure)
{
const gpu_id_t num_gpus = context.get_num_devices();
std::cout << "INFO: " << sizeof(data_t)*batch_size*num_gpus << " bytes (all2all_async)" << std::endl;
std::vector<data_t *> srcs(num_gpus);
std::vector<data_t *> dsts(num_gpus);
const std::vector<size_t> lens(num_gpus, batch_size);
const std::vector<size_t> mems_lens(num_gpus, batch_size_secure);
{
GpuTimer gtimer(context.get_streams(0)[0], "malloc_devices", context.get_device_id(0), std::cout);
for (gpu_id_t gpu = 0; gpu < num_gpus; ++gpu) {
cudaSetDevice(context.get_device_id(gpu)); CUERR
cudaMalloc(&srcs[gpu], sizeof(data_t)*mems_lens[gpu]); CUERR
cudaMalloc(&dsts[gpu], sizeof(data_t)*mems_lens[gpu]); CUERR
}
}
{
GpuTimer gtimer(context.get_streams(0)[0], "zero_gpu_buffers", context.get_device_id(0), std::cout);
memset_all(context, srcs, mems_lens, data_t(0));
memset_all(context, dsts, mems_lens, data_t(0));
context.sync_all_streams();
CUERR
}
// generate batch of data on each device
{
GpuTimer gtimer(context.get_streams(0)[0], "init_data", context.get_device_id(0), std::cout);
generate_all(context, srcs, lens);
context.sync_all_streams();
CUERR
}
// perform multisplit on each device
auto part_hash = [=] DEVICEQUALIFIER (const data_t& x){
return (x % num_gpus);
};
std::vector<std::vector<size_t>> table(num_gpus, std::vector<size_t>(num_gpus));
{
GpuTimer gtimer(context.get_streams(0)[0], "multisplit", context.get_device_id(0), std::cout);
multisplit.execAsync(srcs, lens, dsts, lens, table, part_hash);
multisplit.sync();
}
print_partition_table(table);
// prepare all2all --------------------------------------------------------
srcs.swap(dsts);
std::vector<size_t> bufs_lens = all2all.calcBufferLengths(table);
print_buffer_sizes(bufs_lens);
std::vector<data_t *> bufs(num_gpus);
{
GpuTimer gtimer(context.get_streams(0)[0], "malloc_buffers", context.get_device_id(0), std::cout);
for (gpu_id_t gpu = 0; gpu < num_gpus; ++gpu) {
cudaSetDevice(context.get_device_id(gpu)); CUERR
cudaMalloc(&bufs[gpu], sizeof(data_t)*bufs_lens[gpu]); CUERR
}
}
// reset dsts and buffer to zero
{
GpuTimer gtimer(context.get_streams(0)[0], "reset_buffers", context.get_device_id(0), std::cout);
memset_all(context, dsts, mems_lens, data_t(0));
memset_all(context, bufs, bufs_lens, data_t(0));
context.sync_all_streams();
CUERR
}
// all2all.show_plan();
{
GpuTimer gtimer(context.get_streams(0)[0], "all2all_async", context.get_device_id(0), std::cout);
all2all.execAsync(srcs, mems_lens, dsts, mems_lens, bufs, bufs_lens, table);
all2all.sync();
}
{
GpuTimer gtimer(context.get_streams(0)[0], "validate", context.get_device_id(0), std::cout);
std::vector<size_t> lengths(num_gpus, 0);
for (gpu_id_t trg = 0; trg < num_gpus; trg++) {
for (gpu_id_t src = 0; src < num_gpus; src++)
lengths[trg] += table[src][trg];
}
for (gpu_id_t gpu = 0; gpu < num_gpus; gpu++) {
cudaSetDevice(context.get_device_id(gpu));
validate<<<256, 1024, 0, context.get_streams(gpu)[0]>>>
(dsts[gpu], lengths[gpu], gpu, part_hash);
}
CUERR
}
// cleanup ----------------------------------------------------------------
context.sync_hard();
for (gpu_id_t gpu = 0; gpu < num_gpus; ++gpu) {
cudaSetDevice(context.get_device_id(gpu)); CUERR
cudaFree(srcs[gpu]); CUERR
cudaFree(dsts[gpu]); CUERR
cudaFree(bufs[gpu]); CUERR
}
}
template<
typename data_t,
class T1,
class T2,
class T3,
class T4,
class T5>
void run_multisplit_scatter_gather(
T1& context,
T2& point2point,
T3& multisplit,
T4& scatter,
T5& gather,
gpu_id_t main_gpu,
const size_t batch_size,
const size_t batch_size_secure)
{
const gpu_id_t num_gpus = context.get_num_devices();
std::cout << "INFO: " << sizeof(data_t)*batch_size << " bytes (scatter_gather)" << std::endl;
std::vector<data_t *> srcs(num_gpus);
std::vector<data_t *> dsts(num_gpus);
std::vector<size_t > lens(num_gpus, 0);
lens[main_gpu] = batch_size;
const std::vector<size_t> mems_lens(num_gpus, batch_size_secure);
{
GpuTimer gtimer(context.get_streams(0)[0], "malloc_devices", context.get_device_id(0), std::cout);
for (gpu_id_t gpu = 0; gpu < num_gpus; ++gpu) {
cudaSetDevice(context.get_device_id(gpu)); CUERR
cudaMalloc(&srcs[gpu], sizeof(data_t)*mems_lens[gpu]); CUERR
cudaMalloc(&dsts[gpu], sizeof(data_t)*mems_lens[gpu]); CUERR
}
}
{
GpuTimer gtimer(context.get_streams(0)[0], "zero_gpu_buffers", context.get_device_id(0), std::cout);
memset_all(context, srcs, mems_lens, data_t(0));
memset_all(context, dsts, mems_lens, data_t(0));
context.sync_all_streams();
CUERR
}
{
GpuTimer gtimer(context.get_streams(0)[0], "init_data", context.get_device_id(0), std::cout);
cudaSetDevice(context.get_device_id(main_gpu));
generate_data<<<256, 1024, 0, context.get_streams(main_gpu)[0]>>>
(srcs[main_gpu], lens[main_gpu], size_t(0));
context.sync_all_streams();
CUERR
}
// perform multisplit on main device
auto part_hash = [=] HOSTDEVICEQUALIFIER (const data_t& x){
return (x % num_gpus);
};
std::vector<std::vector<size_t>> table(num_gpus, std::vector<size_t>(num_gpus));
{
GpuTimer gtimer(context.get_streams(0)[0], "multisplit", context.get_device_id(0), std::cout);
multisplit.execAsync(srcs, lens, dsts, lens, table, part_hash);
multisplit.sync();
}
print_partition_table(table);
// prepare scatter --------------------------------------------------------
srcs.swap(dsts);
std::vector<size_t> bufs_lens_scatter = scatter.calcBufferLengths(table[main_gpu]);
print_buffer_sizes(bufs_lens_scatter);
std::vector<data_t *> bufs(num_gpus);
std::vector<size_t> bufs_lens(bufs_lens_scatter);
{
GpuTimer gtimer(context.get_streams(0)[0], "malloc_buffers", context.get_device_id(0), std::cout);
for (gpu_id_t gpu = 0; gpu < num_gpus; ++gpu) {
cudaSetDevice(context.get_device_id(gpu)); CUERR
cudaMalloc(&bufs[gpu], sizeof(data_t)*bufs_lens[gpu]); CUERR
}
}
// reset dsts and buffer to zero
{
GpuTimer gtimer(context.get_streams(0)[0], "reset_buffers", context.get_device_id(0), std::cout);
memset_all(context, dsts, mems_lens, data_t(0));
memset_all(context, bufs, bufs_lens, data_t(0));
context.sync_all_streams();
CUERR
}
// scatter.show_plan();
{
GpuTimer gtimer(context.get_streams(0)[0], "scatter", context.get_device_id(0), std::cout);
scatter.execAsync(srcs[main_gpu], mems_lens[main_gpu],
dsts, mems_lens,
bufs, bufs_lens,
table[main_gpu]);
scatter.sync();
CUERR
}
{
GpuTimer gtimer(context.get_streams(0)[0], "validate_scatter", context.get_device_id(0), std::cout);
for (gpu_id_t gpu = 0; gpu < num_gpus; gpu++) {
cudaSetDevice(context.get_device_id(gpu));
validate<<<256, 1024, 0, context.get_streams(gpu)[0]>>>
(dsts[gpu], table[main_gpu][gpu], gpu, part_hash);
}
context.sync_all_streams();
CUERR
}
std::cout << '\n';
// prepare gather ---------------------------------------------------------
srcs.swap(dsts);
std::vector<size_t> bufs_lens_gather = gather.calcBufferLengths(table[main_gpu]);
print_buffer_sizes(bufs_lens_gather);
{
GpuTimer gtimer(context.get_streams(0)[0], "realloc_buffers", context.get_device_id(0), std::cout);
for (gpu_id_t gpu = 0; gpu < num_gpus; ++gpu) {
if(bufs_lens[gpu] < bufs_lens_gather[gpu]) {
bufs_lens[gpu] = bufs_lens_gather[gpu];
cudaSetDevice(context.get_device_id(gpu)); CUERR
cudaFree(bufs[gpu]); CUERR
cudaMalloc(&bufs[gpu], sizeof(data_t)*bufs_lens[gpu]); CUERR
}
}
}
// reset dsts and buffer to zero
{
GpuTimer gtimer(context.get_streams(0)[0], "reset_buffers_again", context.get_device_id(0), std::cout);
memset_all(context, dsts, mems_lens, data_t(0));
memset_all(context, bufs, bufs_lens, data_t(0));
context.sync_all_streams();
CUERR
}
// gather.show_plan();
{
GpuTimer gtimer(context.get_streams(0)[0], "gather", context.get_device_id(0), std::cout);
gather.execAsync(srcs, mems_lens,
dsts[main_gpu], mems_lens[main_gpu],
bufs, bufs_lens,
table[main_gpu]);
gather.sync();
CUERR
}
{
GpuTimer gtimer(context.get_streams(0)[0], "validate_gather", context.get_device_id(0), std::cout);
std::vector<data_t *> mems2(num_gpus, dsts[main_gpu]);
for (gpu_id_t trg = 1; trg < num_gpus; trg++) {
mems2[trg] = mems2[trg-1] + table[main_gpu][trg-1];
}
cudaSetDevice(context.get_device_id(main_gpu));
for (gpu_id_t gpu = 0; gpu < num_gpus; gpu++) {
validate<<<256, 1024, 0, context.get_streams(main_gpu)[0]>>>
(mems2[gpu], table[main_gpu][gpu], gpu, part_hash);
}
context.sync_all_streams();
CUERR
}
// cleanup ----------------------------------------------------------------
context.sync_hard();
for (gpu_id_t gpu = 0; gpu < num_gpus; ++gpu) {
cudaSetDevice(context.get_device_id(gpu)); CUERR
cudaFree(srcs[gpu]); CUERR
cudaFree(dsts[gpu]); CUERR
cudaFree(bufs[gpu]); CUERR
}
}
template<
typename data_t,
class T1,
class T2,
class T3,
class T4>
void run_multisplit_broadcast(
T1& context,
T2& point2point,
T3& multisplit,
T4& broadcast,
const size_t batch_size,
const size_t batch_size_secure)
{
const gpu_id_t num_gpus = context.get_num_devices();
const gpu_id_t main_gpu = 0;
std::cout << "INFO: " << sizeof(data_t)*batch_size << " bytes (broadcast)" << std::endl;
std::vector<data_t *> srcs(num_gpus);
std::vector<data_t *> dsts(num_gpus);
std::vector<size_t > lens(num_gpus, 0);
lens[main_gpu] = batch_size;
const std::vector<size_t> mems_lens(num_gpus, batch_size_secure);
{
GpuTimer gtimer(context.get_streams(0)[0], "malloc_devices", context.get_device_id(0), std::cout);
for (gpu_id_t gpu = 0; gpu < num_gpus; ++gpu) {
cudaSetDevice(context.get_device_id(gpu)); CUERR
cudaMalloc(&srcs[gpu], sizeof(data_t)*mems_lens[gpu]); CUERR
cudaMalloc(&dsts[gpu], sizeof(data_t)*mems_lens[gpu]); CUERR
}
}
{
GpuTimer gtimer(context.get_streams(0)[0], "zero_gpu_buffers", context.get_device_id(0), std::cout);
memset_all(context, srcs, mems_lens, data_t(0));
memset_all(context, dsts, mems_lens, data_t(0));
context.sync_all_streams();
CUERR
}
// generate batch of data on main device
{
GpuTimer gtimer(context.get_streams(0)[0], "init_data", context.get_device_id(0), std::cout);
cudaSetDevice(context.get_device_id(main_gpu));
generate_data<<<256, 1024, 0, context.get_streams(main_gpu)[0]>>>
(srcs[main_gpu], lens[main_gpu], size_t(0));
context.sync_all_streams();
CUERR
}
// perform multisplit on main device
auto part_hash = [=] HOSTDEVICEQUALIFIER (const data_t& x){
return (x % num_gpus);
};
std::vector<std::vector<size_t>> table(num_gpus, std::vector<size_t>(num_gpus));
{
GpuTimer gtimer(context.get_streams(0)[0], "multisplit", context.get_device_id(0), std::cout);
multisplit.execAsync(srcs, lens, dsts, lens, table, part_hash);
multisplit.sync();
}
print_partition_table(table);
// prepare broadcast ------------------------------------------------------
srcs.swap(dsts);
// reset dsts to zero
memset_all(context, dsts, mems_lens, data_t(0));
context.sync_all_streams();
CUERR
// broadcast.show_plan();
size_t total = 0;
for(auto& t : table[main_gpu])
total += t;
{
GpuTimer gtimer(context.get_streams(0)[0], "broadcast", context.get_device_id(0), std::cout);
broadcast.execAsync(srcs[main_gpu], mems_lens[main_gpu], total, dsts, mems_lens);
broadcast.sync();
}
std::vector<size_t> prefix(num_gpus+1);
for (gpu_id_t part = 0; part < num_gpus; part++) {
prefix[part+1] = prefix[part] + table[main_gpu][part];
}
{
GpuTimer gtimer(context.get_streams(0)[0], "validate_broadcast", context.get_device_id(0), std::cout);
for (gpu_id_t gpu = 0; gpu < num_gpus; gpu++) {
cudaSetDevice(context.get_device_id(gpu));
for (gpu_id_t part = 0; part < num_gpus; part++)
validate<<<256, 1024, 0, context.get_streams(gpu)[part]>>>
(dsts[gpu]+prefix[part], table[main_gpu][part], part, part_hash);
}
context.sync_all_streams();
CUERR
}
std::cout << '\n';
// cleanup ----------------------------------------------------------------
context.sync_hard();
for (gpu_id_t gpu = 0; gpu < num_gpus; ++gpu) {
cudaSetDevice(context.get_device_id(gpu)); CUERR
cudaFree(srcs[gpu]); CUERR
cudaFree(dsts[gpu]); CUERR
}
}
|
the_stack
|
* CTA-processing functionality for radix sort upsweep reduction kernels
******************************************************************************/
#pragma once
#include "../../radix_sort/sort_utils.cuh"
#include "../../util/basic_utils.cuh"
#include "../../util/device_intrinsics.cuh"
#include "../../util/io/load_tile.cuh"
#include "../../util/reduction/serial_reduce.cuh"
#include "../../util/ns_umbrella.cuh"
B40C_NS_PREFIX
namespace b40c {
namespace radix_sort {
namespace upsweep {
/**
* Radix sort upsweep reduction CTA
*/
template <
typename KernelPolicy,
typename SizeT,
typename KeyType>
struct Cta
{
//---------------------------------------------------------------------
// Type definitions and constants
//---------------------------------------------------------------------
typedef typename KeyTraits<KeyType>::UnsignedBits UnsignedBits;
// Integer type for digit counters (to be packed into words of PackedCounters)
typedef unsigned char DigitCounter;
// Integer type for packing DigitCounters into columns of shared memory banks
typedef typename util::If<(KernelPolicy::SMEM_CONFIG == cudaSharedMemBankSizeEightByte),
unsigned long long,
unsigned int>::Type PackedCounter;
enum {
CURRENT_BIT = KernelPolicy::CURRENT_BIT,
CURRENT_PASS = KernelPolicy::CURRENT_PASS,
RADIX_BITS = KernelPolicy::RADIX_BITS,
RADIX_DIGITS = 1 << RADIX_BITS,
// Direction of flow though ping-pong buffers: (FLOP_TURN) ? (d_keys1 --> d_keys0) : (d_keys0 --> d_keys1)
FLOP_TURN = KernelPolicy::CURRENT_PASS & 0x1,
LOG_CTA_THREADS = KernelPolicy::LOG_CTA_THREADS,
CTA_THREADS = 1 << LOG_CTA_THREADS,
LOG_WARP_THREADS = CUB_LOG_WARP_THREADS(__CUB_CUDA_ARCH__),
WARP_THREADS = 1 << LOG_WARP_THREADS,
LOG_WARPS = LOG_CTA_THREADS - LOG_WARP_THREADS,
WARPS = 1 << LOG_WARPS,
LOG_LOAD_VEC_SIZE = KernelPolicy::LOG_LOAD_VEC_SIZE,
LOAD_VEC_SIZE = 1 << LOG_LOAD_VEC_SIZE,
LOG_LOADS_PER_TILE = KernelPolicy::LOG_LOADS_PER_TILE,
LOADS_PER_TILE = 1 << LOG_LOADS_PER_TILE,
LOG_THREAD_ELEMENTS = LOG_LOAD_VEC_SIZE + LOG_LOADS_PER_TILE,
THREAD_ELEMENTS = 1 << LOG_THREAD_ELEMENTS,
LOG_TILE_ELEMENTS = LOG_THREAD_ELEMENTS + LOG_CTA_THREADS,
TILE_ELEMENTS = 1 << LOG_TILE_ELEMENTS,
BYTES_PER_COUNTER = sizeof(DigitCounter),
LOG_BYTES_PER_COUNTER = util::Log2<BYTES_PER_COUNTER>::VALUE,
PACKING_RATIO = sizeof(PackedCounter) / sizeof(DigitCounter),
LOG_PACKING_RATIO = util::Log2<PACKING_RATIO>::VALUE,
LOG_COUNTER_LANES = CUB_MAX(0, RADIX_BITS - LOG_PACKING_RATIO),
COUNTER_LANES = 1 << LOG_COUNTER_LANES,
// To prevent counter overflow, we must periodically unpack and aggregate the
// digit counters back into registers. Each counter lane is assigned to a
// warp for aggregation.
LOG_LANES_PER_WARP = CUB_MAX(0, LOG_COUNTER_LANES - LOG_WARPS),
LANES_PER_WARP = 1 << LOG_LANES_PER_WARP,
// Unroll tiles in batches without risk of counter overflow
UNROLL_COUNT = CUB_MIN(64, 255 / THREAD_ELEMENTS),
UNROLLED_ELEMENTS = UNROLL_COUNT * TILE_ELEMENTS,
};
/**
* Shared storage for radix distribution sorting upsweep
*/
struct SmemStorage
{
union {
unsigned char counter_base[1];
DigitCounter digit_counters[COUNTER_LANES][CTA_THREADS][PACKING_RATIO];
PackedCounter packed_counters[COUNTER_LANES][CTA_THREADS];
SizeT digit_partials[RADIX_DIGITS][WARP_THREADS + 1];
};
};
//---------------------------------------------------------------------
// Thread fields
//---------------------------------------------------------------------
// Shared storage for this CTA
SmemStorage &smem_storage;
// Thread-local counters for periodically aggregating composite-counter lanes
SizeT local_counts[LANES_PER_WARP][PACKING_RATIO];
// Input and output device pointers
UnsignedBits *d_in_keys;
SizeT *d_spine;
int warp_id;
int warp_tid;
DigitCounter *base_counter;
//---------------------------------------------------------------------
// Helper structure for templated iteration
//---------------------------------------------------------------------
// Iterate
template <int COUNT, int MAX>
struct Iterate
{
enum {
HALF = (MAX / 2),
};
// BucketKeys
static __device__ __forceinline__ void BucketKeys(
Cta &cta,
UnsignedBits keys[THREAD_ELEMENTS])
{
cta.Bucket(keys[COUNT]);
// Next
Iterate<COUNT + 1, MAX>::BucketKeys(cta, keys);
}
// ProcessTiles
static __device__ __forceinline__ void ProcessTiles(Cta &cta, SizeT cta_offset)
{
// Next
Iterate<1, HALF>::ProcessTiles(cta, cta_offset);
Iterate<1, MAX - HALF>::ProcessTiles(cta, cta_offset + (HALF * TILE_ELEMENTS));
}
};
// Terminate
template <int MAX>
struct Iterate<MAX, MAX>
{
// BucketKeys
static __device__ __forceinline__ void BucketKeys(Cta &cta, UnsignedBits keys[THREAD_ELEMENTS]) {}
// ProcessTiles
static __device__ __forceinline__ void ProcessTiles(Cta &cta, SizeT cta_offset)
{
cta.ProcessFullTile(cta_offset);
}
};
//---------------------------------------------------------------------
// Methods
//---------------------------------------------------------------------
/**
* Constructor
*/
__device__ __forceinline__ Cta(
SmemStorage &smem_storage,
SizeT *d_spine,
KeyType *d_keys0,
KeyType *d_keys1) :
smem_storage(smem_storage),
d_in_keys(reinterpret_cast<UnsignedBits*>(FLOP_TURN ? d_keys1 : d_keys0)),
d_spine(d_spine),
warp_id(threadIdx.x >> LOG_WARP_THREADS),
warp_tid(util::LaneId())
{
base_counter = smem_storage.digit_counters[warp_id][warp_tid];
}
/**
* Decode a key and increment corresponding smem digit counter
*/
__device__ __forceinline__ void Bucket(UnsignedBits key)
{
// Compute byte offset of smem counter. Add in thread column.
unsigned int byte_offset = (threadIdx.x << (LOG_PACKING_RATIO + LOG_BYTES_PER_COUNTER));
// Perform transform op
UnsignedBits converted_key = KeyTraits<KeyType>::TwiddleIn(key);
// Add in sub-counter byte_offset
byte_offset = Extract<
CURRENT_BIT,
LOG_PACKING_RATIO,
LOG_BYTES_PER_COUNTER>(
converted_key,
byte_offset);
// Add in row byte_offset
byte_offset = Extract<
CURRENT_BIT + LOG_PACKING_RATIO,
LOG_COUNTER_LANES,
LOG_CTA_THREADS + (LOG_PACKING_RATIO + LOG_BYTES_PER_COUNTER)>(
converted_key,
byte_offset);
// Increment counter
DigitCounter *counter = (DigitCounter*) (smem_storage.counter_base + byte_offset);
(*counter)++;
}
/**
* Reset composite counters
*/
__device__ __forceinline__ void ResetDigitCounters()
{
#pragma unroll
for (int LANE = 0; LANE < COUNTER_LANES; LANE++)
{
smem_storage.packed_counters[LANE][threadIdx.x] = 0;
}
}
/**
* Reset the unpacked counters in each thread
*/
__device__ __forceinline__ void ResetUnpackedCounters()
{
#pragma unroll
for (int LANE = 0; LANE < LANES_PER_WARP; LANE++)
{
#pragma unroll
for (int UNPACKED_COUNTER = 0; UNPACKED_COUNTER < PACKING_RATIO; UNPACKED_COUNTER++)
{
local_counts[LANE][UNPACKED_COUNTER] = 0;
}
}
}
/**
* Extracts and aggregates the digit counters for each counter lane
* owned by this warp
*/
__device__ __forceinline__ void UnpackDigitCounts()
{
if (warp_id < COUNTER_LANES)
{
#pragma unroll
for (int LANE = 0; LANE < LANES_PER_WARP; LANE++)
{
const int COUNTER_LANE = LANE * WARPS;
#pragma unroll
for (int PACKED_COUNTER = 0; PACKED_COUNTER < CTA_THREADS; PACKED_COUNTER += WARP_THREADS)
{
#pragma unroll
for (int UNPACKED_COUNTER = 0; UNPACKED_COUNTER < PACKING_RATIO; UNPACKED_COUNTER++)
{
const int OFFSET = (((COUNTER_LANE * CTA_THREADS) + PACKED_COUNTER) * PACKING_RATIO) + UNPACKED_COUNTER;
local_counts[LANE][UNPACKED_COUNTER] += *(base_counter + OFFSET);
}
}
}
}
}
/**
* Places unpacked counters into smem for final digit reduction
*/
__device__ __forceinline__ void ReduceUnpackedCounts()
{
// Place unpacked digit counters in shared memory
if (warp_id < COUNTER_LANES)
{
#pragma unroll
for (int LANE = 0; LANE < LANES_PER_WARP; LANE++)
{
const int COUNTER_LANE = LANE * WARPS;
int digit_row = (COUNTER_LANE + warp_id) << LOG_PACKING_RATIO;
#pragma unroll
for (int UNPACKED_COUNTER = 0; UNPACKED_COUNTER < PACKING_RATIO; UNPACKED_COUNTER++)
{
smem_storage.digit_partials[digit_row + UNPACKED_COUNTER][warp_tid]
= local_counts[LANE][UNPACKED_COUNTER];
}
}
}
__syncthreads();
// Rake-reduce and write out the bin_count reductions
if (threadIdx.x < RADIX_DIGITS)
{
SizeT bin_count = util::reduction::SerialReduce<WARP_THREADS>::Invoke(
smem_storage.digit_partials[threadIdx.x]);
int spine_bin_offset = (gridDim.x * threadIdx.x) + blockIdx.x;
util::io::ModifiedStore<KernelPolicy::STORE_MODIFIER>::St(
bin_count,
d_spine + spine_bin_offset);
}
}
/**
* Processes a single, full tile
*/
__device__ __forceinline__ void ProcessFullTile(SizeT cta_offset)
{
// Tile of keys
UnsignedBits keys[LOADS_PER_TILE][LOAD_VEC_SIZE];
// Read tile of keys
util::io::LoadTile<
LOG_LOADS_PER_TILE,
LOG_LOAD_VEC_SIZE,
CTA_THREADS,
KernelPolicy::LOAD_MODIFIER,
false>::LoadValid(
(UnsignedBits (*)[LOAD_VEC_SIZE]) keys,
d_in_keys,
cta_offset);
// Prevent bucketing from being hoisted (otherwise we don't get the desired outstanding loads)
if (LOADS_PER_TILE > 1) __syncthreads();
// Bucket tile of keys
Iterate<0, THREAD_ELEMENTS>::BucketKeys(*this, (UnsignedBits*) keys);
}
/**
* Processes a single load (may have some threads masked off)
*/
__device__ __forceinline__ void ProcessPartialTile(
SizeT cta_offset,
const SizeT &out_of_bounds)
{
// Process partial tile if necessary using single loads
cta_offset += threadIdx.x;
while (cta_offset < out_of_bounds)
{
// Load and bucket key
UnsignedBits key = d_in_keys[cta_offset];
Bucket(key);
cta_offset += CTA_THREADS;
}
}
/**
* Process work range of tiles
*/
__device__ __forceinline__ void ProcessWorkRange(
util::CtaWorkLimits<SizeT> &work_limits)
{
// Reset digit counters in smem and unpacked counters in registers
ResetDigitCounters();
ResetUnpackedCounters();
SizeT cta_offset = work_limits.offset;
// Unroll batches of full tiles
while (cta_offset + UNROLLED_ELEMENTS < work_limits.out_of_bounds)
{
Iterate<0, UNROLL_COUNT>::ProcessTiles(*this, cta_offset);
cta_offset += UNROLLED_ELEMENTS;
__syncthreads();
// Aggregate back into local_count registers to prevent overflow
UnpackDigitCounts();
__syncthreads();
// Reset composite counters in lanes
ResetDigitCounters();
}
// Unroll single full tiles
while (cta_offset < work_limits.guarded_offset)
{
ProcessFullTile(cta_offset);
cta_offset += TILE_ELEMENTS;
}
// Process partial tile if necessary
ProcessPartialTile(cta_offset, work_limits.out_of_bounds);
__syncthreads();
// Aggregate back into local_count registers
UnpackDigitCounts();
__syncthreads();
// Final raking reduction of counts by bin, output to spine.
ReduceUnpackedCounts();
}
};
} // namespace upsweep
} // namespace radix_sort
} // namespace b40c
B40C_NS_POSTFIX
|
the_stack
|
__global__
void comm_empty(
real_2_t * __restrict sigma_in,
real_2_t * __restrict sigma_out,
real_2_t * __restrict hamiltonian)
{
}
__global__
void comm_init (
const real_2_t * __restrict sigma_in,
real_2_t * __restrict sigma_out,
const real_2_t * __restrict hamiltonian,
const int dim)
{
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int sigma_id = gid * dim * dim;
// compute commutator: -i * dt/hbar * (hamiltonian * sigma_in[sigma_id] - sigma_in[sigma_id] * hamiltonian)
for (int i = 0; i < dim; ++i) {
for (int j = 0; j < dim; ++j) {
real_2_t tmp;
tmp.x = 0.0;
tmp.y = 0.0;
for (int k = 0; k < dim; ++k) {
// z=(x,y), w=(u,v) z*w = (xu-yv, xv+yu)
tmp.x += (hamiltonian[i * dim + k].x * sigma_in[sigma_id + k * dim + j].x -
sigma_in[sigma_id + i * dim + k].x * hamiltonian[k * dim + j].x);
tmp.x -= (hamiltonian[i * dim + k].y * sigma_in[sigma_id + k * dim + j].y -
sigma_in[sigma_id + i * dim + k].y * hamiltonian[k * dim + j].y);
tmp.y += (hamiltonian[i * dim + k].x * sigma_in[sigma_id + k * dim + j].y -
sigma_in[sigma_id + i * dim + k].x * hamiltonian[k * dim + j].y);
tmp.y += (hamiltonian[i * dim + k].y * sigma_in[sigma_id + k * dim + j].x -
sigma_in[sigma_id + i * dim + k].y * hamiltonian[k * dim + j].x);
}
// multiply with -i * dt / hbar
sigma_out[sigma_id + i * dim + j].x += hdt * tmp.y;
sigma_out[sigma_id + i * dim + j].y -= hdt * tmp.x;
}
}
}
__global__
void comm_refactor(
const real_2_t * __restrict sigma2_in,
real_2_t * __restrict sigma2_out,
const real_2_t * __restrict hamiltonian2,
const int dim)
{
#define sigma_real(i, j) (sigma_id + 2 * ((i) * dim + (j)))
#define sigma_imag(i, j) (sigma_id + 2 * ((i) * dim + (j)) + 1)
#define ham_real(i, j) (2 * ((i) * dim + (j)))
#define ham_imag(i, j) (2 * ((i) * dim + (k)) + 1)
real_t *__restrict sigma_in = (real_t*) sigma2_in;
real_t *__restrict sigma_out = (real_t*) sigma2_out;
real_t *__restrict hamiltonian = (real_t*) hamiltonian2;
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int sigma_id = gid * dim * dim * 2;
for (int i = 0; i < dim; ++i) {
for (int j = 0; j < dim; ++j) {
real_t tmp_real = 0.0;
real_t tmp_imag = 0.0;
for (int k = 0; k < dim; ++k) {
tmp_real += hamiltonian[ham_real(i, k)] * sigma_in[sigma_real(k, j)];
tmp_real -= sigma_in[sigma_real(i, k)] * hamiltonian[ham_real(k, j)];
tmp_real -= hamiltonian[ham_imag(i, k)] * sigma_in[sigma_imag(k, j)];
tmp_real += sigma_in[sigma_imag(i, k)] * hamiltonian[ham_imag(k, j)];
tmp_imag += hamiltonian[ham_real(i, k)] * sigma_in[sigma_imag(k, j)];
tmp_imag -= sigma_in[sigma_real(i, k)] * hamiltonian[ham_imag(k, j)];
tmp_imag += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_real(k, j)];
tmp_imag -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_real(k, j)];
}
// multiply with -i dt/hbar
sigma_out[sigma_real(i, j)] += hdt * tmp_imag;
sigma_out[sigma_imag(i, j)] -= hdt * tmp_real;
}
}
}
__global__
void comm_refactor_direct_store(
const real_2_t * __restrict sigma2_in,
real_2_t * __restrict sigma2_out,
const real_2_t * __restrict hamiltonian2,
const int dim)
{
#define sigma_real(i, j) (sigma_id + 2 * ((i) * dim + (j)))
#define sigma_imag(i, j) (sigma_id + 2 * ((i) * dim + (j)) + 1)
#define ham_real(i, j) (2 * ((i) * dim + (j)))
#define ham_imag(i, j) (2 * ((i) * dim + (k)) + 1)
real_t *__restrict sigma_in = (real_t*) sigma2_in;
real_t *__restrict sigma_out = (real_t*) sigma2_out;
real_t *__restrict hamiltonian = (real_t*) hamiltonian2;
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int sigma_id = gid * dim * dim * 2;
for (int i = 0; i < dim; ++i) {
for (int j = 0; j < dim; ++j) {
for (int k = 0; k < dim; ++k) {
sigma_out[sigma_real(i, j)] += hamiltonian[ham_real(i, k)] * sigma_in[sigma_imag(k, j)];
sigma_out[sigma_real(i, j)] -= sigma_in[sigma_real(i, k)] * hamiltonian[ham_imag(k, j)];
sigma_out[sigma_real(i, j)] += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_real(k, j)];
sigma_out[sigma_real(i, j)] -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_real(k, j)];
sigma_out[sigma_imag(i, j)] -= hamiltonian[ham_real(i, k)] * sigma_in[sigma_real(k, j)];
sigma_out[sigma_imag(i, j)] += sigma_in[sigma_real(i, k)] * hamiltonian[ham_real(k, j)];
sigma_out[sigma_imag(i, j)] += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_imag(k, j)];
sigma_out[sigma_imag(i, j)] -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_imag(k, j)];
}
}
}
}
__global__
void comm_aosoa_naive(
const real_2_t * __restrict sigma2_in,
real_2_t * __restrict sigma2_out,
const real_2_t * __restrict hamiltonian2,
const int dim)
{
real_t *__restrict sigma_in = (real_t*) sigma2_in;
real_t *__restrict sigma_out = (real_t*) sigma2_out;
real_t *__restrict hamiltonian = (real_t*) hamiltonian2;
int gid = blockIdx.x * blockDim.x + threadIdx.x;
#define package_id ((gid / VEC_LENGTH_AUTO) * VEC_LENGTH_AUTO * 2 * dim * dim)
#define sigma_id (gid % VEC_LENGTH_AUTO)
#define sigma_real(i, j) (package_id + 2 * VEC_LENGTH_AUTO * (dim * (i) + (j)) + (sigma_id))
#define sigma_imag(i, j) (package_id + 2 * VEC_LENGTH_AUTO * (dim * (i) + (j)) + VEC_LENGTH_AUTO + (sigma_id))
#define ham_real(i, j) ((i) * dim + (j))
#define ham_imag(i, j) (dim * dim + (i) * dim + (j))
for (int i = 0; i < dim; ++i) {
for (int j = 0; j < dim; ++j) {
real_t tmp_real = 0.0;
real_t tmp_imag = 0.0;
for (int k = 0; k < dim; ++k) {
tmp_imag -= hamiltonian[ham_real(i, k)] * sigma_in[sigma_real(k, j)];
tmp_imag += sigma_in[sigma_real(i, k)] * hamiltonian[ham_real(k, j)];
tmp_imag += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_imag(k, j)];
tmp_imag -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_imag(k, j)];
tmp_real += hamiltonian[ham_real(i, k)] * sigma_in[sigma_imag(k, j)];
tmp_real -= sigma_in[sigma_real(i, k)] * hamiltonian[ham_imag(k, j)];
tmp_real += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_real(k, j)];
tmp_real -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_real(k, j)];
}
sigma_out[sigma_real(i, j)] += tmp_real;
sigma_out[sigma_imag(i, j)] += tmp_imag;
}
}
}
__global__
void comm_aosoa_naive_constants (
const real_2_t * __restrict sigma2_in,
real_2_t * __restrict sigma2_out,
const real_2_t * __restrict hamiltonian2,
const int dim)
{
real_t *__restrict sigma_in = (real_t*) sigma2_in;
real_t *__restrict sigma_out = (real_t*) sigma2_out;
real_t *__restrict hamiltonian = (real_t*) hamiltonian2;
int gid = blockIdx.x * blockDim.x + threadIdx.x;
#define package_id ((gid / VEC_LENGTH_AUTO) * VEC_LENGTH_AUTO * 2 * DIM * DIM)
#define sigma_id (gid % VEC_LENGTH_AUTO)
#define sigma_real(i, j) (package_id + 2 * VEC_LENGTH_AUTO * (DIM * (i) + (j)) + (sigma_id))
#define sigma_imag(i, j) (package_id + 2 * VEC_LENGTH_AUTO * (DIM * (i) + (j)) + VEC_LENGTH_AUTO + (sigma_id))
#define ham_real(i, j) ((i) * DIM + (j))
#define ham_imag(i, j) (DIM * DIM + (i) * DIM + (j))
for (int i = 0; i < DIM; ++i) {
for (int j = 0; j < DIM; ++j) {
real_t tmp_real = 0.0;
real_t tmp_imag = 0.0;
for (int k = 0; k < DIM; ++k) {
tmp_imag -= hamiltonian[ham_real(i, k)] * sigma_in[sigma_real(k, j)];
tmp_imag += sigma_in[sigma_real(i, k)] * hamiltonian[ham_real(k, j)];
tmp_imag += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_imag(k, j)];
tmp_imag -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_imag(k, j)];
tmp_real += hamiltonian[ham_real(i, k)] * sigma_in[sigma_imag(k, j)];
tmp_real -= sigma_in[sigma_real(i, k)] * hamiltonian[ham_imag(k, j)];
tmp_real += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_real(k, j)];
tmp_real -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_real(k, j)];
}
sigma_out[sigma_real(i, j)] += tmp_real;
sigma_out[sigma_imag(i, j)] += tmp_imag;
}
}
}
__global__
void comm_aosoa_naive_constants_perm (
const real_2_t * __restrict sigma2_in,
real_2_t * __restrict sigma2_out,
const real_2_t * __restrict hamiltonian2)
{
real_t *__restrict sigma_in = (real_t*) sigma2_in;
real_t *__restrict sigma_out = (real_t*) sigma2_out;
real_t *__restrict hamiltonian = (real_t*) hamiltonian2;
int gid = blockIdx.x * blockDim.x + threadIdx.x;
#define package_id ((gid / VEC_LENGTH_AUTO) * VEC_LENGTH_AUTO * 2 * DIM * DIM)
#define sigma_id (gid % VEC_LENGTH_AUTO)
#define sigma_real(i, j) (package_id + 2 * VEC_LENGTH_AUTO * (DIM * (i) + (j)) + (sigma_id))
#define sigma_imag(i, j) (package_id + 2 * VEC_LENGTH_AUTO * (DIM * (i) + (j)) + VEC_LENGTH_AUTO + (sigma_id))
#define ham_real(i, j) ((i) * DIM + (j))
#define ham_imag(i, j) (DIM * DIM + (i) * DIM + (j))
// compute commutator: (hamiltonian * sigma_in[sigma_id] - sigma_in[sigma_id] * hamiltonian)
for (int i = 0; i < DIM; ++i) {
for (int k = 0; k < DIM; ++k) {
real_t ham_real_tmp = hamiltonian[ham_real(i, k)];
real_t ham_imag_tmp = hamiltonian[ham_imag(i, k)];
real_t sigma_real_tmp = sigma_in[sigma_real(i, k)];
real_t sigma_imag_tmp = sigma_in[sigma_imag(i, k)];
for (int j = 0; j < DIM; ++j) {
#ifdef USE_INITZERO
real_t tmp_real = 0.0;
real_t tmp_imag = 0.0;
#else
real_t tmp_real = sigma_out[sigma_real(i, j)];
real_t tmp_imag = sigma_out[sigma_imag(i, j)];
#endif
tmp_imag -= ham_real_tmp * sigma_in[sigma_real(k, j)];
tmp_imag += sigma_real_tmp * hamiltonian[ham_real(k, j)];
tmp_imag += ham_imag_tmp * sigma_in[sigma_imag(k, j)];
tmp_imag -= sigma_imag_tmp * hamiltonian[ham_imag(k, j)];
tmp_real += ham_real_tmp * sigma_in[sigma_imag(k, j)];
tmp_real -= sigma_real_tmp * hamiltonian[ham_imag(k, j)];
tmp_real += ham_imag_tmp * sigma_in[sigma_real(k, j)];
tmp_real -= sigma_imag_tmp * hamiltonian[ham_real(k, j)];
#ifdef USE_INITZERO
sigma_out[sigma_real(i, j)] += tmp_real;
sigma_out[sigma_imag(i, j)] += tmp_imag;
#else
sigma_out[sigma_real(i, j)] = tmp_real;
sigma_out[sigma_imag(i, j)] = tmp_imag;
#endif
}
}
}
}
__global__
void comm_aosoa_naive_direct (
const real_2_t * __restrict sigma2_in,
real_2_t * __restrict sigma2_out,
const real_2_t * __restrict hamiltonian2,
const int dim)
{
real_t *__restrict sigma_in = (real_t*) sigma2_in;
real_t *__restrict sigma_out = (real_t*) sigma2_out;
real_t *__restrict hamiltonian = (real_t*) hamiltonian2;
int gid = blockIdx.x * blockDim.x + threadIdx.x;
#define package_id ((gid / VEC_LENGTH_AUTO) * VEC_LENGTH_AUTO * 2 * dim * dim)
#define sigma_id (gid % VEC_LENGTH_AUTO)
#define sigma_real(i, j) (package_id + 2 * VEC_LENGTH_AUTO * (dim * (i) + (j)) + (sigma_id))
#define sigma_imag(i, j) (package_id + 2 * VEC_LENGTH_AUTO * (dim * (i) + (j)) + VEC_LENGTH_AUTO + (sigma_id))
#define ham_real(i, j) ((i) * dim + (j))
#define ham_imag(i, j) (dim * dim + (i) * dim + (j))
// compute commutator: (hamiltonian * sigma_in[sigma_id] - sigma_in[sigma_id] * hamiltonian)
for (int i = 0; i < dim; ++i) {
for (int j = 0; j < dim; ++j) {
for (int k = 0; k < dim; ++k) {
sigma_out[sigma_imag(i, j)] -= hamiltonian[ham_real(i, k)] * sigma_in[sigma_real(k, j)];
sigma_out[sigma_imag(i, j)] += sigma_in[sigma_real(i, k)] * hamiltonian[ham_real(k, j)];
sigma_out[sigma_imag(i, j)] += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_imag(k, j)];
sigma_out[sigma_imag(i, j)] -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_imag(k, j)];
sigma_out[sigma_real(i, j)] += hamiltonian[ham_real(i, k)] * sigma_in[sigma_imag(k, j)];
sigma_out[sigma_real(i, j)] -= sigma_in[sigma_real(i, k)] * hamiltonian[ham_imag(k, j)];
sigma_out[sigma_real(i, j)] += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_real(k, j)];
sigma_out[sigma_real(i, j)] -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_real(k, j)];
}
}
}
}
__global__
void comm_aosoa_naive_constants_direct (
const real_2_t * __restrict sigma2_in,
real_2_t * __restrict sigma2_out,
const real_2_t * __restrict hamiltonian2)
{
real_t *__restrict sigma_in = (real_t*) sigma2_in;
real_t *__restrict sigma_out = (real_t*) sigma2_out;
real_t *__restrict hamiltonian = (real_t*) hamiltonian2;
int gid = blockIdx.x * blockDim.x + threadIdx.x;
#define package_id ((gid / VEC_LENGTH_AUTO) * VEC_LENGTH_AUTO * 2 * DIM * DIM)
#define sigma_id (gid % VEC_LENGTH_AUTO)
#define sigma_real(i, j) (package_id + 2 * VEC_LENGTH_AUTO * (DIM * (i) + (j)) + (sigma_id))
#define sigma_imag(i, j) (package_id + 2 * VEC_LENGTH_AUTO * (DIM * (i) + (j)) + VEC_LENGTH_AUTO + (sigma_id))
#define ham_real(i, j) ((i) * DIM + (j))
#define ham_imag(i, j) (DIM * DIM + (i) * DIM + (j))
// compute commutator: (hamiltonian * sigma_in[sigma_id] - sigma_in[sigma_id] * hamiltonian)
for (int i = 0; i < DIM; ++i) {
for (int j = 0; j < DIM; ++j) {
for (int k = 0; k < DIM; ++k) {
sigma_out[sigma_imag(i, j)] -= hamiltonian[ham_real(i, k)] * sigma_in[sigma_real(k, j)];
sigma_out[sigma_imag(i, j)] += sigma_in[sigma_real(i, k)] * hamiltonian[ham_real(k, j)];
sigma_out[sigma_imag(i, j)] += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_imag(k, j)];
sigma_out[sigma_imag(i, j)] -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_imag(k, j)];
sigma_out[sigma_real(i, j)] += hamiltonian[ham_real(i, k)] * sigma_in[sigma_imag(k, j)];
sigma_out[sigma_real(i, j)] -= sigma_in[sigma_real(i, k)] * hamiltonian[ham_imag(k, j)];
sigma_out[sigma_real(i, j)] += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_real(k, j)];
sigma_out[sigma_real(i, j)] -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_real(k, j)];
}
}
}
}
__global__
void comm_aosoa_naive_constants_direct_perm (
const real_2_t * __restrict sigma2_in,
real_2_t * __restrict sigma2_out,
const real_2_t * __restrict hamiltonian2)
{
real_t *__restrict sigma_in = (real_t*) sigma2_in;
real_t *__restrict sigma_out = (real_t*) sigma2_out;
real_t *__restrict hamiltonian = (real_t*) hamiltonian2;
int gid = blockIdx.x * blockDim.x + threadIdx.x;
#define package_id ((gid / VEC_LENGTH_AUTO) * VEC_LENGTH_AUTO * 2 * DIM * DIM)
#define sigma_id (gid % VEC_LENGTH_AUTO)
#define sigma_real(i, j) (package_id + 2 * VEC_LENGTH_AUTO * (DIM * (i) + (j)) + (sigma_id))
#define sigma_imag(i, j) (package_id + 2 * VEC_LENGTH_AUTO * (DIM * (i) + (j)) + VEC_LENGTH_AUTO + (sigma_id))
#define ham_real(i, j) ((i) * DIM + (j))
#define ham_imag(i, j) (DIM * DIM + (i) * DIM + (j))
// compute commutator: (hamiltonian * sigma_in[sigma_id] - sigma_in[sigma_id] * hamiltonian)
for (int i = 0; i < DIM; ++i) {
for (int k = 0; k < DIM; ++k) {
real_t ham_real_tmp = hamiltonian[ham_real(i, k)];
real_t ham_imag_tmp = hamiltonian[ham_imag(i, k)];
real_t sigma_real_tmp = sigma_in[sigma_real(i, k)];
real_t sigma_imag_tmp = sigma_in[sigma_imag(i, k)];
for (int j = 0; j < DIM; ++j) {
sigma_out[sigma_imag(i, j)] -= ham_real_tmp * sigma_in[sigma_real(k, j)];
sigma_out[sigma_imag(i, j)] += sigma_real_tmp * hamiltonian[ham_real(k, j)];
sigma_out[sigma_imag(i, j)] += ham_imag_tmp * sigma_in[sigma_imag(k, j)];
sigma_out[sigma_imag(i, j)] -= sigma_imag_tmp * hamiltonian[ham_imag(k, j)];
sigma_out[sigma_real(i, j)] += ham_real_tmp * sigma_in[sigma_imag(k, j)];
sigma_out[sigma_real(i, j)] -= sigma_real_tmp * hamiltonian[ham_imag(k, j)];
sigma_out[sigma_real(i, j)] += ham_imag_tmp * sigma_in[sigma_real(k, j)];
sigma_out[sigma_real(i, j)] -= sigma_imag_tmp * hamiltonian[ham_real(k, j)];
}
}
}
}
__global__
void comm_aosoa (
const real_2_t * __restrict sigma2_in,
real_2_t * __restrict sigma2_out,
const real_2_t * __restrict hamiltonian2,
const int dim)
{
real_t *__restrict sigma_in = (real_t*) sigma2_in;
real_t *__restrict sigma_out = (real_t*) sigma2_out;
real_t *__restrict hamiltonian = (real_t*) hamiltonian2;
#define package_id ((PACKAGES_PER_WG * blockIdx.y + threadIdx.y) * (VEC_LENGTH_AUTO * 2 * dim * dim))
#define sigma_id threadIdx.x
#define sigma_real(i, j) (package_id + 2 * VEC_LENGTH_AUTO * (dim * (i) + (j)) + sigma_id)
#define sigma_imag(i, j) (package_id + 2 * VEC_LENGTH_AUTO * (dim * (i) + (j)) + VEC_LENGTH_AUTO + sigma_id)
#define ham_real(i, j) ((i) * dim + (j))
#define ham_imag(i, j) (dim * dim + (i) * dim + (j))
for (int i = 0; i < dim; ++i) {
for (int j = 0; j < dim; ++j) {
real_t tmp_real = 0.0;
real_t tmp_imag = 0.0;
for (int k = 0; k < dim; ++k) {
tmp_imag -= hamiltonian[ham_real(i, k)] * sigma_in[sigma_real(k, j)];
tmp_imag += sigma_in[sigma_real(i, k)] * hamiltonian[ham_real(k, j)];
tmp_imag += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_imag(k, j)];
tmp_imag -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_imag(k, j)];
tmp_real += hamiltonian[ham_real(i, k)] * sigma_in[sigma_imag(k, j)];
tmp_real -= sigma_in[sigma_real(i, k)] * hamiltonian[ham_imag(k, j)];
tmp_real += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_real(k, j)];
tmp_real -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_real(k, j)];
}
sigma_out[sigma_real(i, j)] += tmp_real;
sigma_out[sigma_imag(i, j)] += tmp_imag;
}
}
}
__global__
void comm_aosoa_constants (
const real_2_t * __restrict sigma2_in,
real_2_t * __restrict sigma2_out,
const real_2_t * __restrict hamiltonian2)
{
real_t *__restrict sigma_in = (real_t*) sigma2_in;
real_t *__restrict sigma_out = (real_t*) sigma2_out;
real_t *__restrict hamiltonian = (real_t*) hamiltonian2;
#define package_id ((PACKAGES_PER_WG * blockIdx.y + threadIdx.y) * (VEC_LENGTH_AUTO * 2 * DIM * DIM))
#define sigma_id threadIdx.x
#define sigma_real(i, j) (package_id + 2 * VEC_LENGTH_AUTO * (DIM * (i) + (j)) + sigma_id)
#define sigma_imag(i, j) (package_id + 2 * VEC_LENGTH_AUTO * (DIM * (i) + (j)) + VEC_LENGTH_AUTO + sigma_id)
#define ham_real(i, j) ((i) * DIM + (j))
#define ham_imag(i, j) (DIM * DIM + (i) * DIM + (j))
for (int i = 0; i < DIM; ++i) {
for (int j = 0; j < DIM; ++j) {
real_t tmp_real = 0.0;
real_t tmp_imag = 0.0;
for (int k = 0; k < DIM; ++k) {
tmp_imag -= hamiltonian[ham_real(i, k)] * sigma_in[sigma_real(k, j)];
tmp_imag += sigma_in[sigma_real(i, k)] * hamiltonian[ham_real(k, j)];
tmp_imag += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_imag(k, j)];
tmp_imag -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_imag(k, j)];
tmp_real += hamiltonian[ham_real(i, k)] * sigma_in[sigma_imag(k, j)];
tmp_real -= sigma_in[sigma_real(i, k)] * hamiltonian[ham_imag(k, j)];
tmp_real += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_real(k, j)];
tmp_real -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_real(k, j)];
}
sigma_out[sigma_real(i, j)] += tmp_real;
sigma_out[sigma_imag(i, j)] += tmp_imag;
}
}
}
__global__
void comm_aosoa_constants_perm (
const real_2_t * __restrict sigma2_in,
real_2_t * __restrict sigma2_out,
const real_2_t * __restrict hamiltonian2)
{
real_t *__restrict sigma_in = (real_t*) sigma2_in;
real_t *__restrict sigma_out = (real_t*) sigma2_out;
real_t *__restrict hamiltonian = (real_t*) hamiltonian2;
#define package_id ((PACKAGES_PER_WG * blockIdx.y + threadIdx.y) * (VEC_LENGTH_AUTO * 2 * DIM * DIM))
#define sigma_id threadIdx.x
#define sigma_real(i, j) (package_id + 2 * VEC_LENGTH_AUTO * (DIM * (i) + (j)) + sigma_id)
#define sigma_imag(i, j) (package_id + 2 * VEC_LENGTH_AUTO * (DIM * (i) + (j)) + VEC_LENGTH_AUTO + sigma_id)
#define ham_real(i, j) ((i) * DIM + (j))
#define ham_imag(i, j) (DIM * DIM + (i) * DIM + (j))
for (int i = 0; i < DIM; ++i) {
for (int k = 0; k < DIM; ++k) {
real_t ham_real_tmp = hamiltonian[ham_real(i, k)];
real_t ham_imag_tmp = hamiltonian[ham_imag(i, k)];
real_t sigma_real_tmp = sigma_in[sigma_real(i, k)];
real_t sigma_imag_tmp = sigma_in[sigma_imag(i, k)];
for (int j = 0; j < DIM; ++j) {
#ifdef USE_INITZERO
real_t tmp_real = 0.0;
real_t tmp_imag = 0.0;
#else
real_t tmp_real = sigma_out[sigma_real(i, j)];
real_t tmp_imag = sigma_out[sigma_imag(i, j)];
#endif
tmp_imag -= ham_real_tmp * sigma_in[sigma_real(k, j)];
tmp_imag += sigma_real_tmp * hamiltonian[ham_real(k, j)];
tmp_imag += ham_imag_tmp * sigma_in[sigma_imag(k, j)];
tmp_imag -= sigma_imag_tmp * hamiltonian[ham_imag(k, j)];
tmp_real += ham_real_tmp * sigma_in[sigma_imag(k, j)];
tmp_real -= sigma_real_tmp * hamiltonian[ham_imag(k, j)];
tmp_real += ham_imag_tmp * sigma_in[sigma_real(k, j)];
tmp_real -= sigma_imag_tmp * hamiltonian[ham_real(k, j)];
#ifdef USE_INITZERO
sigma_out[sigma_real(i, j)] += tmp_real;
sigma_out[sigma_imag(i, j)] += tmp_imag;
#else
sigma_out[sigma_real(i, j)] = tmp_real;
sigma_out[sigma_imag(i, j)] = tmp_imag;
#endif
}
}
}
}
__global__
void comm_aosoa_direct (
const real_2_t * __restrict sigma2_in,
real_2_t * __restrict sigma2_out,
const real_2_t * __restrict hamiltonian2,
const int dim)
{
real_t *__restrict sigma_in = (real_t*) sigma2_in;
real_t *__restrict sigma_out = (real_t*) sigma2_out;
real_t *__restrict hamiltonian = (real_t*) hamiltonian2;
#define package_id ((PACKAGES_PER_WG * blockIdx.y + threadIdx.y) * (VEC_LENGTH_AUTO * 2 * dim * dim))
#define sigma_id threadIdx.x
#define sigma_real(i, j) (package_id + 2 * VEC_LENGTH_AUTO * (dim * (i) + (j)) + sigma_id)
#define sigma_imag(i, j) (package_id + 2 * VEC_LENGTH_AUTO * (dim * (i) + (j)) + VEC_LENGTH_AUTO + sigma_id)
#define ham_real(i, j) ((i) * dim + (j))
#define ham_imag(i, j) (dim * dim + (i) * dim + (j))
for (int i = 0; i < dim; ++i) {
for (int j = 0; j < dim; ++j) {
for (int k = 0; k < dim; ++k) {
sigma_out[sigma_imag(i, j)] -= hamiltonian[ham_real(i, k)] * sigma_in[sigma_real(k, j)];
sigma_out[sigma_imag(i, j)] += sigma_in[sigma_real(i, k)] * hamiltonian[ham_real(k, j)];
sigma_out[sigma_imag(i, j)] += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_imag(k, j)];
sigma_out[sigma_imag(i, j)] -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_imag(k, j)];
sigma_out[sigma_real(i, j)] += hamiltonian[ham_real(i, k)] * sigma_in[sigma_imag(k, j)];
sigma_out[sigma_real(i, j)] -= sigma_in[sigma_real(i, k)] * hamiltonian[ham_imag(k, j)];
sigma_out[sigma_real(i, j)] += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_real(k, j)];
sigma_out[sigma_real(i, j)] -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_real(k, j)];
}
}
}
}
__global__
void comm_aosoa_constants_direct (
const real_2_t * __restrict sigma2_in,
real_2_t * __restrict sigma2_out,
const real_2_t * __restrict hamiltonian2)
{
real_t *__restrict sigma_in = (real_t*) sigma2_in;
real_t *__restrict sigma_out = (real_t*) sigma2_out;
real_t *__restrict hamiltonian = (real_t*) hamiltonian2;
#define package_id ((PACKAGES_PER_WG * blockIdx.y + threadIdx.y) * (VEC_LENGTH_AUTO * 2 * DIM * DIM))
#define sigma_id threadIdx.x
#define sigma_real(i, j) (package_id + 2 * VEC_LENGTH_AUTO * (DIM * (i) + (j)) + sigma_id)
#define sigma_imag(i, j) (package_id + 2 * VEC_LENGTH_AUTO * (DIM * (i) + (j)) + VEC_LENGTH_AUTO + sigma_id)
#define ham_real(i, j) ((i) * DIM + (j))
#define ham_imag(i, j) (DIM * DIM + (i) * DIM + (j))
for (int i = 0; i < DIM; ++i) {
for (int j = 0; j < DIM; ++j) {
for (int k = 0; k < DIM; ++k) {
sigma_out[sigma_imag(i, j)] -= hamiltonian[ham_real(i, k)] * sigma_in[sigma_real(k, j)];
sigma_out[sigma_imag(i, j)] += sigma_in[sigma_real(i, k)] * hamiltonian[ham_real(k, j)];
sigma_out[sigma_imag(i, j)] += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_imag(k, j)];
sigma_out[sigma_imag(i, j)] -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_imag(k, j)];
sigma_out[sigma_real(i, j)] += hamiltonian[ham_real(i, k)] * sigma_in[sigma_imag(k, j)];
sigma_out[sigma_real(i, j)] -= sigma_in[sigma_real(i, k)] * hamiltonian[ham_imag(k, j)];
sigma_out[sigma_real(i, j)] += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_real(k, j)];
sigma_out[sigma_real(i, j)] -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_real(k, j)];
}
}
}
}
__global__
void comm_aosoa_constants_direct_perm (
const real_2_t * __restrict sigma2_in,
real_2_t * __restrict sigma2_out,
const real_2_t * __restrict hamiltonian2)
{
real_t *__restrict sigma_in = (real_t*) sigma2_in;
real_t *__restrict sigma_out = (real_t*) sigma2_out;
real_t *__restrict hamiltonian = (real_t*) hamiltonian2;
#define package_id ((PACKAGES_PER_WG * blockIdx.y + threadIdx.y) * (VEC_LENGTH_AUTO * 2 * DIM * DIM))
#define sigma_id threadIdx.x
#define sigma_real(i, j) (package_id + 2 * VEC_LENGTH_AUTO * (DIM * (i) + (j)) + sigma_id)
#define sigma_imag(i, j) (package_id + 2 * VEC_LENGTH_AUTO * (DIM * (i) + (j)) + VEC_LENGTH_AUTO + sigma_id)
#define ham_real(i, j) ((i) * DIM + (j))
#define ham_imag(i, j) (DIM * DIM + (i) * DIM + (j))
for (int i = 0; i < DIM; ++i) {
for (int k = 0; k < DIM; ++k) {
real_t ham_real_tmp = hamiltonian[ham_real(i, k)];
real_t ham_imag_tmp = hamiltonian[ham_imag(i, k)];
real_t sigma_real_tmp = sigma_in[sigma_real(i, k)];
real_t sigma_imag_tmp = sigma_in[sigma_imag(i, k)];
for (int j = 0; j < DIM; ++j) {
sigma_out[sigma_imag(i, j)] -= ham_real_tmp * sigma_in[sigma_real(k, j)];
sigma_out[sigma_imag(i, j)] += sigma_real_tmp * hamiltonian[ham_real(k, j)];
sigma_out[sigma_imag(i, j)] += ham_imag_tmp * sigma_in[sigma_imag(k, j)];
sigma_out[sigma_imag(i, j)] -= sigma_imag_tmp * hamiltonian[ham_imag(k, j)];
sigma_out[sigma_real(i, j)] += ham_real_tmp * sigma_in[sigma_imag(k, j)];
sigma_out[sigma_real(i, j)] -= sigma_real_tmp * hamiltonian[ham_imag(k, j)];
sigma_out[sigma_real(i, j)] += ham_imag_tmp * sigma_in[sigma_real(k, j)];
sigma_out[sigma_real(i, j)] -= sigma_imag_tmp * hamiltonian[ham_real(k, j)];
}
}
}
}
__global__
void comm_manual_aosoa (
const real_2_t * __restrict sigma2_in,
real_2_t * __restrict sigma2_out,
const real_2_t * __restrict hamiltonian2,
const int dim)
{
real_vec_t *__restrict sigma_in = (real_vec_t*) sigma2_in;
real_vec_t *__restrict sigma_out = (real_vec_t*) sigma2_out;
real_t *__restrict hamiltonian = (real_t*) hamiltonian2;
int gid = blockIdx.x * blockDim.x + threadIdx.x;
// number of package to process == get_global_id(0)
#define package_id (gid * dim * dim * 2)
#define sigma_real(i, j) (package_id + 2 * (dim * (i) + (j)))
#define sigma_imag(i, j) (package_id + 2 * (dim * (i) + (j)) + 1)
#define ham_real(i, j) ((i) * dim + (j))
#define ham_imag(i, j) (dim * dim + (i) * dim + (j))
// compute commutator: (hamiltonian * sigma_in[sigma_id] - sigma_in[sigma_id] * hamiltonian)
for (int i = 0; i < dim; ++i) {
for (int j = 0; j < dim; ++j) {
real_vec_t tmp_real = v(0.0);
real_vec_t tmp_imag = v(0.0);
for (int k = 0; k < dim; ++k) {
tmp_imag -= hamiltonian[ham_real(i, k)] * sigma_in[sigma_real(k, j)];
tmp_imag += sigma_in[sigma_real(i, k)] * hamiltonian[ham_real(k, j)];
tmp_imag += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_imag(k, j)];
tmp_imag -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_imag(k, j)];
tmp_real += hamiltonian[ham_real(i, k)] * sigma_in[sigma_imag(k, j)];
tmp_real -= sigma_in[sigma_real(i, k)] * hamiltonian[ham_imag(k, j)];
tmp_real += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_real(k, j)];
tmp_real -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_real(k, j)];
}
sigma_out[sigma_real(i, j)] += tmp_real;
sigma_out[sigma_imag(i, j)] += tmp_imag;
}
}
}
__global__
void comm_manual_aosoa_constants (
const real_2_t * __restrict sigma2_in,
real_2_t * __restrict sigma2_out,
const real_2_t * __restrict hamiltonian2)
{
real_vec_t *__restrict sigma_in = (real_vec_t*) sigma2_in;
real_vec_t *__restrict sigma_out = (real_vec_t*) sigma2_out;
real_t *__restrict hamiltonian = (real_t*) hamiltonian2;
int gid = blockIdx.x * blockDim.x + threadIdx.x;
#define package_id (gid * DIM * DIM * 2)
#define sigma_real(i, j) (package_id + 2 * (DIM * (i) + (j)))
#define sigma_imag(i, j) (package_id + 2 * (DIM * (i) + (j)) + 1)
#define ham_real(i, j) ((i) * DIM + (j))
#define ham_imag(i, j) (DIM * DIM + (i) * DIM + (j))
// compute commutator: (hamiltonian * sigma_in[sigma_id] - sigma_in[sigma_id] * hamiltonian)
for (int i = 0; i < DIM; ++i) {
for (int j = 0; j < DIM; ++j) {
real_vec_t tmp_real = v(0.0);
real_vec_t tmp_imag = v(0.0);
for (int k = 0; k < DIM; ++k) {
tmp_imag -= hamiltonian[ham_real(i, k)] * sigma_in[sigma_real(k, j)];
tmp_imag += sigma_in[sigma_real(i, k)] * hamiltonian[ham_real(k, j)];
tmp_imag += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_imag(k, j)];
tmp_imag -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_imag(k, j)];
tmp_real += hamiltonian[ham_real(i, k)] * sigma_in[sigma_imag(k, j)];
tmp_real -= sigma_in[sigma_real(i, k)] * hamiltonian[ham_imag(k, j)];
tmp_real += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_real(k, j)];
tmp_real -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_real(k, j)];
}
sigma_out[sigma_real(i, j)] += tmp_real;
sigma_out[sigma_imag(i, j)] += tmp_imag;
}
}
}
__global__
void comm_manual_aosoa_constants_perm (
const real_2_t * __restrict sigma2_in,
real_2_t * __restrict sigma2_out,
const real_2_t * __restrict hamiltonian2)
{
real_vec_t *__restrict sigma_in = (real_vec_t*) sigma2_in;
real_vec_t *__restrict sigma_out = (real_vec_t*) sigma2_out;
real_t *__restrict hamiltonian = (real_t*) hamiltonian2;
int gid = blockIdx.x * blockDim.x + threadIdx.x;
#define package_id (gid * DIM * DIM * 2)
#define sigma_real(i, j) (package_id + 2 * (DIM * (i) + (j)))
#define sigma_imag(i, j) (package_id + 2 * (DIM * (i) + (j)) + 1)
#define ham_real(i, j) ((i) * DIM + (j))
#define ham_imag(i, j) (DIM * DIM + (i) * DIM + (j))
// compute commutator: (hamiltonian * sigma_in[sigma_id] - sigma_in[sigma_id] * hamiltonian)
for (int i = 0; i < DIM; ++i) {
for (int k = 0; k < DIM; ++k) {
real_vec_t ham_real_tmp = v(hamiltonian[ham_real(i, k)]);
real_vec_t ham_imag_tmp = v(hamiltonian[ham_imag(i, k)]);
real_vec_t sigma_real_tmp = sigma_in[sigma_real(i, k)];
real_vec_t sigma_imag_tmp = sigma_in[sigma_imag(i, k)];
for (int j = 0; j < DIM; ++j) {
#ifdef USE_INITZERO
real_vec_t tmp_real = v(0.0);
real_vec_t tmp_imag = v(0.0);
#else
real_vec_t tmp_real = sigma_out[sigma_real(i, j)];
real_vec_t tmp_imag = sigma_out[sigma_imag(i, j)];
#endif
tmp_imag -= ham_real_tmp * sigma_in[sigma_real(k, j)];
tmp_imag += sigma_real_tmp * hamiltonian[ham_real(k, j)];
tmp_imag += ham_imag_tmp * sigma_in[sigma_imag(k, j)];
tmp_imag -= sigma_imag_tmp * hamiltonian[ham_imag(k, j)];
tmp_real += ham_real_tmp * sigma_in[sigma_imag(k, j)];
tmp_real -= sigma_real_tmp * hamiltonian[ham_imag(k, j)];
tmp_real += ham_imag_tmp * sigma_in[sigma_real(k, j)];
tmp_real -= sigma_imag_tmp * hamiltonian[ham_real(k, j)];
#ifdef USE_INITZERO
sigma_out[sigma_real(i, j)] += tmp_real;
sigma_out[sigma_imag(i, j)] += tmp_imag;
#else
sigma_out[sigma_real(i, j)] = tmp_real;
sigma_out[sigma_imag(i, j)] = tmp_imag;
#endif
}
}
}
}
__global__
void comm_manual_aosoa_constants_perm_prefetch (
const real_2_t * __restrict sigma2_in,
real_2_t * __restrict sigma2_out,
const real_2_t * __restrict hamiltonian2)
{
real_vec_t *__restrict sigma_in = (real_vec_t*) sigma2_in;
real_vec_t *__restrict sigma_out = (real_vec_t*) sigma2_out;
real_t *__restrict hamiltonian = (real_t*) hamiltonian2;
int gid = blockIdx.x * blockDim.x + threadIdx.x;
#define package_id (gid * DIM * DIM * 2)
#define sigma_real(i, j) (package_id + 2 * (DIM * (i) + (j)))
#define sigma_imag(i, j) (package_id + 2 * (DIM * (i) + (j)) + 1)
#define ham_real(i, j) ((i) * DIM + (j))
#define ham_imag(i, j) (DIM * DIM + (i) * DIM + (j))
// compute commutator: (hamiltonian * sigma_in[sigma_id] - sigma_in[sigma_id] * hamiltonian)
for (int i = 0; i < DIM; ++i) {
int j = 0;
//(sigma_out.get_pointer() + sigma_real(i, j)).prefetch(2 * DIM);
for (j = 0; j < DIM; ++j) {
real_vec_t tmp_real = v(0.0);
real_vec_t tmp_imag = v(0.0);
for (int k = 0; k < DIM; ++k) {
tmp_imag -= hamiltonian[ham_real(i, k)] * sigma_in[sigma_real(k, j)];
tmp_imag += sigma_in[sigma_real(i, k)] * hamiltonian[ham_real(k, j)];
tmp_imag += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_imag(k, j)];
tmp_imag -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_imag(k, j)];
tmp_real += hamiltonian[ham_real(i, k)] * sigma_in[sigma_imag(k, j)];
tmp_real -= sigma_in[sigma_real(i, k)] * hamiltonian[ham_imag(k, j)];
tmp_real += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_real(k, j)];
tmp_real -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_real(k, j)];
}
sigma_out[sigma_real(i, j)] += tmp_real;
sigma_out[sigma_imag(i, j)] += tmp_imag;
}
}
}
__global__
void comm_manual_aosoa_direct (
const real_2_t * __restrict sigma2_in,
real_2_t * __restrict sigma2_out,
const real_2_t * __restrict hamiltonian2,
const int dim)
{
real_vec_t *__restrict sigma_in = (real_vec_t*) sigma2_in;
real_vec_t *__restrict sigma_out = (real_vec_t*) sigma2_out;
real_t *__restrict hamiltonian = (real_t*) hamiltonian2;
int gid = blockIdx.x * blockDim.x + threadIdx.x;
#define package_id (gid * dim * dim * 2)
#define sigma_real(i, j) (package_id + 2 * (dim * (i) + (j)))
#define sigma_imag(i, j) (package_id + 2 * (dim * (i) + (j)) + 1)
#define ham_real(i, j) ((i) * dim + (j))
#define ham_imag(i, j) (dim * dim + (i) * dim + (j))
for (int i = 0; i < dim; ++i) {
for (int j = 0; j < dim; ++j) {
for (int k = 0; k < dim; ++k) {
sigma_out[sigma_imag(i, j)] -= hamiltonian[ham_real(i, k)] * sigma_in[sigma_real(k, j)];
sigma_out[sigma_imag(i, j)] += sigma_in[sigma_real(i, k)] * hamiltonian[ham_real(k, j)];
sigma_out[sigma_imag(i, j)] += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_imag(k, j)];
sigma_out[sigma_imag(i, j)] -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_imag(k, j)];
sigma_out[sigma_real(i, j)] += hamiltonian[ham_real(i, k)] * sigma_in[sigma_imag(k, j)];
sigma_out[sigma_real(i, j)] -= sigma_in[sigma_real(i, k)] * hamiltonian[ham_imag(k, j)];
sigma_out[sigma_real(i, j)] += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_real(k, j)];
sigma_out[sigma_real(i, j)] -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_real(k, j)];
}
}
}
}
__global__
void comm_manual_aosoa_constants_direct (
const real_2_t * __restrict sigma2_in,
real_2_t * __restrict sigma2_out,
const real_2_t * __restrict hamiltonian2)
{
real_vec_t *__restrict sigma_in = (real_vec_t*) sigma2_in;
real_vec_t *__restrict sigma_out = (real_vec_t*) sigma2_out;
real_t *__restrict hamiltonian = (real_t*) hamiltonian2;
int gid = blockIdx.x * blockDim.x + threadIdx.x;
#define package_id (gid * DIM * DIM * 2)
#define sigma_real(i, j) (package_id + 2 * (DIM * (i) + (j)))
#define sigma_imag(i, j) (package_id + 2 * (DIM * (i) + (j)) + 1)
#define ham_real(i, j) ((i) * DIM + (j))
#define ham_imag(i, j) (DIM * DIM + (i) * DIM + (j))
for (int i = 0; i < DIM; ++i) {
for (int j = 0; j < DIM; ++j) {
for (int k = 0; k < DIM; ++k) {
sigma_out[sigma_imag(i, j)] -= hamiltonian[ham_real(i, k)] * sigma_in[sigma_real(k, j)];
sigma_out[sigma_imag(i, j)] += sigma_in[sigma_real(i, k)] * hamiltonian[ham_real(k, j)];
sigma_out[sigma_imag(i, j)] += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_imag(k, j)];
sigma_out[sigma_imag(i, j)] -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_imag(k, j)];
sigma_out[sigma_real(i, j)] += hamiltonian[ham_real(i, k)] * sigma_in[sigma_imag(k, j)];
sigma_out[sigma_real(i, j)] -= sigma_in[sigma_real(i, k)] * hamiltonian[ham_imag(k, j)];
sigma_out[sigma_real(i, j)] += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_real(k, j)];
sigma_out[sigma_real(i, j)] -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_real(k, j)];
}
}
}
}
__global__
void comm_manual_aosoa_constants_direct_prefetch (
const real_2_t * __restrict sigma2_in,
real_2_t * __restrict sigma2_out,
const real_2_t * __restrict hamiltonian2)
{
real_vec_t *__restrict sigma_in = (real_vec_t*) sigma2_in;
real_vec_t *__restrict sigma_out = (real_vec_t*) sigma2_out;
real_t *__restrict hamiltonian = (real_t*) hamiltonian2;
int gid = blockIdx.x * blockDim.x + threadIdx.x;
#define package_id (gid * DIM * DIM * 2)
#define sigma_real(i, j) (package_id + 2 * (DIM * (i) + (j)))
#define sigma_imag(i, j) (package_id + 2 * (DIM * (i) + (j)) + 1)
#define ham_real(i, j) ((i) * DIM + (j))
#define ham_imag(i, j) (DIM * DIM + (i) * DIM + (j))
// compute commutator: (hamiltonian * sigma_in[sigma_id] - sigma_in[sigma_id] * hamiltonian)
for (int i = 0; i < DIM; ++i) {
// prefetch result memory for the next inner loops
int j = 0;
//prefetch(&sigma_out[sigma_real(i, j)], 2 * DIM);
//(sigma_out.get_pointer() + sigma_real(i, j)).prefetch(2 * DIM);
for (j = 0; j < DIM; ++j) {
for (int k = 0; k < DIM; ++k)
{
sigma_out[sigma_imag(i, j)] -= hamiltonian[ham_real(i, k)] * sigma_in[sigma_real(k, j)];
sigma_out[sigma_imag(i, j)] += sigma_in[sigma_real(i, k)] * hamiltonian[ham_real(k, j)];
sigma_out[sigma_imag(i, j)] += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_imag(k, j)];
sigma_out[sigma_imag(i, j)] -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_imag(k, j)];
sigma_out[sigma_real(i, j)] += hamiltonian[ham_real(i, k)] * sigma_in[sigma_imag(k, j)];
sigma_out[sigma_real(i, j)] -= sigma_in[sigma_real(i, k)] * hamiltonian[ham_imag(k, j)];
sigma_out[sigma_real(i, j)] += hamiltonian[ham_imag(i, k)] * sigma_in[sigma_real(k, j)];
sigma_out[sigma_real(i, j)] -= sigma_in[sigma_imag(i, k)] * hamiltonian[ham_real(k, j)];
}
}
}
}
__global__
void comm_manual_aosoa_constants_direct_perm (
const real_2_t * __restrict sigma2_in,
real_2_t * __restrict sigma2_out,
const real_2_t * __restrict hamiltonian2)
{
real_vec_t *__restrict sigma_in = (real_vec_t*) sigma2_in;
real_vec_t *__restrict sigma_out = (real_vec_t*) sigma2_out;
real_t *__restrict hamiltonian = (real_t*) hamiltonian2;
int gid = blockIdx.x * blockDim.x + threadIdx.x;
#define package_id (gid * DIM * DIM * 2)
#define sigma_real(i, j) (package_id + 2 * (DIM * (i) + (j)))
#define sigma_imag(i, j) (package_id + 2 * (DIM * (i) + (j)) + 1)
#define ham_real(i, j) ((i) * DIM + (j))
#define ham_imag(i, j) (DIM * DIM + (i) * DIM + (j))
// compute commutator: (hamiltonian * sigma_in[sigma_id] - sigma_in[sigma_id] * hamiltonian)
for (int i = 0; i < DIM; ++i) {
for (int k = 0; k < DIM; ++k) {
real_vec_t ham_real_tmp = v(hamiltonian[ham_real(i, k)]);
real_vec_t ham_imag_tmp = v(hamiltonian[ham_imag(i, k)]);
real_vec_t sigma_real_tmp = sigma_in[sigma_real(i, k)];
real_vec_t sigma_imag_tmp = sigma_in[sigma_imag(i, k)];
for (int j = 0; j < DIM; ++j) {
sigma_out[sigma_imag(i, j)] -= ham_real_tmp * sigma_in[sigma_real(k, j)];
sigma_out[sigma_imag(i, j)] += sigma_real_tmp * hamiltonian[ham_real(k, j)];
sigma_out[sigma_imag(i, j)] += ham_imag_tmp * sigma_in[sigma_imag(k, j)];
sigma_out[sigma_imag(i, j)] -= sigma_imag_tmp * hamiltonian[ham_imag(k, j)];
sigma_out[sigma_real(i, j)] += ham_real_tmp * sigma_in[sigma_imag(k, j)];
sigma_out[sigma_real(i, j)] -= sigma_real_tmp * hamiltonian[ham_imag(k, j)];
sigma_out[sigma_real(i, j)] += ham_imag_tmp * sigma_in[sigma_real(k, j)];
sigma_out[sigma_real(i, j)] -= sigma_imag_tmp * hamiltonian[ham_real(k, j)];
}
}
}
}
__global__
void final_gpu_kernel (
const real_2_t * __restrict sigma_in,
real_2_t * __restrict sigma_out,
const real_2_t * __restrict hamiltonian,
const int num)
{
#define id_2d_to_1d(i,j) ((i) * DIM + (j))
#define sigma_id(i,j,m) ((m) * DIM * DIM + ((i) * DIM + (j)))
#define MIN(X,Y) ((X) < (Y) ? (X) : (Y))
// Local memory: shared between all work items in the same work group
// 2-way shared memory bank conflicts will occur for real_t = double
// real parts and imaginary parts are stored separately to avoid 4-way bank conflicts in case of real_2_t = double2
// Input sigma matrix: real part (2 matrices are processed at once)
// Input sigma matrix: imag part (2 matrices are processed at once)
__shared__ real_t ham_local_real[DIM*DIM];
__shared__ real_t ham_local_imag[DIM*DIM];
__shared__ real_t sigma_local_real[2][NUM_SUB_GROUPS][DIM*DIM];
__shared__ real_t sigma_local_imag[2][NUM_SUB_GROUPS][DIM*DIM];
// Determine matrix index (i,j) this work item is responsible for
int ij = threadIdx.x;
int i = ij / DIM; // Matrix index 'i' to be processed by this work item in any of 'start -> stop' matrices
int j = ij % DIM; // Matrix index 'j' to be processed by this work item in any of 'start -> stop' matrices
// Determine working set : Each work item participates in processing CHUNK_SIZE matrices : 'start -> stop'
int sub_group_id = threadIdx.y; // Local matrix ID within work group
int start = blockIdx.x * NUM_SUB_GROUPS * CHUNK_SIZE + sub_group_id * CHUNK_SIZE; // Global matrix ID : start
int stop = MIN(num, start + CHUNK_SIZE); // Global matrix ID : stop
// Local variables
real_2_t snew1_ij, snew2_ij;
real_2_t s1, s2;
// Load Hamiltonian into local memory: only the first sub-group participates
if (ij < (DIM * DIM) && sub_group_id == 0)
{
const real_2_t h = hamiltonian[ij];
ham_local_real[ij] = h.x;
ham_local_imag[ij] = h.y;
}
// Process all CHUNK_SIZE matrices: two matrices are processed at once (therefore increment 2)
for (int m = start; m < stop; m += 2)
{
__syncthreads();
if (ij < (DIM * DIM))
{ // Load input sigma matrix into local memory: only threads with valid IDs participate
s1 = sigma_in[sigma_id(i, j, m)]; // Real and imaginary part of matrix 'm', element (i,j)
sigma_local_real[0][sub_group_id][ij] = s1.x;
sigma_local_imag[0][sub_group_id][ij] = s1.y;
s2 = sigma_in[sigma_id(i, j, m + 1)]; // Real and imaginary part of matrix 'm+1', element (i,j)
sigma_local_real[1][sub_group_id][ij] = s2.x;
sigma_local_imag[1][sub_group_id][ij] = s2.y;
s1 = sigma_out[sigma_id(i, j, m)]; // Prefetch real and imaginary part of output sigma matrix 'm', element (i,j)
snew1_ij.x = s1.x;
snew2_ij.x = s1.y;
s2 = sigma_out[sigma_id(i, j, m + 1)]; // Prefetch real and imaginary part of output sigma matrix 'm+1', element (i,j)
snew1_ij.y = s2.x;
snew2_ij.y = s2.y;
}
__syncthreads();
if (ij < (DIM * DIM))
{
// Compute commutator: [H,sigma] = H * sigma - sigma * H <=> [H,sigma]_ij = \sum_k ( H_ik * sigma_kj - sigma_ik * H_kj )
for (int k = 0; k < DIM; ++k)
{
const int ik = id_2d_to_1d(i, k);
const int kj = id_2d_to_1d(k, j);
// Reassemble real_2_t elements from local memory: 'vector processing' gives better performance here
s1 = {sigma_local_real[0][sub_group_id][kj], sigma_local_real[1][sub_group_id][kj]};
s2 = {sigma_local_imag[0][sub_group_id][kj], sigma_local_imag[1][sub_group_id][kj]};
snew1_ij += ham_local_real[ik] * s2;
snew1_ij += ham_local_imag[ik] * s1;
snew2_ij -= ham_local_real[ik] * s1;
snew2_ij += ham_local_imag[ik] * s2;
// Reassemble real_2_t elements from local memory: 'vector processing' gives better performance here
s1 = {sigma_local_real[0][sub_group_id][ik], sigma_local_real[1][sub_group_id][ik]};
s2 = {sigma_local_imag[0][sub_group_id][ik], sigma_local_imag[1][sub_group_id][ik]};
snew1_ij -= ham_local_real[kj] * s2;
snew1_ij += ham_local_imag[kj] * s1;
snew2_ij += ham_local_real[kj] * s1;
snew2_ij -= ham_local_imag[kj] * s2;
}
// Write output sigma matrices 'm' and 'm+1', element (i,j)
sigma_out[sigma_id(i, j, m)] = {snew1_ij.x, snew2_ij.x};
sigma_out[sigma_id(i, j, m + 1)] = {snew1_ij.y, snew2_ij.y};
}
}
}
|
the_stack
|
#include "ScanArray.h"
#include <iostream>
#include <stdio.h>
using namespace std;
#define BLOCK_SIZE 1024
// 宏:NUM_BANKS
// 定义了bank的数量。
#define NUM_BANKS 16
// 宏:LOG_NUM_BANKS
// 定义了bank的对数值。
#define LOG_NUM_BANKS 4
// Kernel 函数: _scanNaiveKer(数组扫描的简单版本)
// 简单版本的 scan 实现,每个线程处理一个元素。运行 log(n) 步,
// 加 n * (log(n) - 1) 次。需要 2 * n 长度的贡献内存。
template < class T, class Operation >
__global__ void // Kernel 函数无返回值
_scanNaiveKer(
T *outarray, // 输入数组
T *inarray, // 输出数组
T *blocksum, // 扫描的中间结果数组,用于存放每块扫描结果的最后一
// 个值,不处理最后一个线程块。少于一块时不处理。
int n, // 元素个数,数组长度
Operation op, // 运算符类型
bool backward // 判断是否为后序扫描
);
// Kernel 函数: _scanWorkEfficientKer(数组扫描的效率版本)
// 效率版本的 scan 实现,每个线程处理两个元素。复杂度是 O(log(n)),需要加法的次
// 数为 O(n)。共享内存长度为 n,使用 balanced tree 算法。
// 来源一为:Blelloch,1990 "Prefix Sums and Their Applications"。
// http://www.cs.cmu.edu/~blelloch/papers/Ble93.pdf
// 来源二为:Prins and Chatterjee PRAM course notes:
// https://www.cs.unc.edu/~prins/Classes/633/Handouts/pram.pdf
template < class T, class Operation >
__global__ void // Kernel 函数无返回值
_scanWorkEfficientKer(
T *outarray, // 输入数组
T *inarray, // 输出数组
T *blocksum, // 扫描的中间结果数组,用于存放每块扫描结果的最后一
// 个值,不处理最后一个线程块。少于一块时不处理。
int n, // 元素个数,数组长度
Operation op, // 运算符类型
bool backward // 判断是否为后序扫描
);
// Kernel 函数: _scanOptKer(数组扫描的优化版本)
// 优化版本的 scan 实现,每个线程处理两个原色。复杂度是 O(log(n)),需要加法的次
// 数为 O(n)。共享内存不检查冲突。
// 使用 balanced tree 算法。
// 来源一为:Blelloch,1990 "Prefix Sums and Their Applications"。
// http://www.cs.cmu.edu/~blelloch/papers/Ble93.pdf
// 来源二为:Prins and Chatterjee PRAM course notes:
// https://www.cs.unc.edu/~prins/Classes/633/Handouts/pram.pdf
template < class T, class Operation >
__global__ void // Kernel 函数无返回值
_scanOptKer(
T *outarray, // 输入数组
T *inarray, // 输出数组
T *blocksum, // 扫描的中间结果数组,用于存放每块扫描结果的最后一
// 个值,不处理最后一个线程块。少于一块时不处理。
int n, // 元素个数,数组长度
Operation op, // 运算类型
bool backward // 判断是否为后序扫描
);
// Kernel 函数: _scanBetterKer(数组扫描的较优版本)
// 优化版本的 scan 实现,每个线程处理两个原色。复杂度是 O(log(n)),需要加法的次
// 数为 O(n)。共享内存的长度为了避免冲突,设计为 n + n / NUM_BANKS。
// 使用 balanced tree 算法。
// 来源一为:Blelloch,1990 "Prefix Sums and Their Applications"。
// http://www.cs.cmu.edu/~blelloch/papers/Ble93.pdf
// 来源二为:Prins and Chatterjee PRAM course notes:
// https://www.cs.unc.edu/~prins/Classes/633/Handouts/pram.pdf
template < class T, class Operation >
__global__ void // Kernel 函数无返回值
_scanBetterKer(
T *outarray, // 输入数组
T *inarray, // 输出数组
T *blocksum, // 扫描的中间结果数组,用于存放每块扫描结果的最后一
// 个值,不处理最后一个线程块。少于一块时不处理。
int n, // 元素个数,数组长度
Operation op, // 运算类型
bool backward // 判断是否为后序扫描
);
// 函数:scanComputeGold(CPU 端的 inclusive 类型数组计算)
// 对一个数组进行扫描,对所有元素进行某操作的遍历,操作包含自身。
template < class T, class Operation >
__host__ int // 返回值:函数是否正确执行,若函数正确
// 执行,返回 NO_ERROR。
scanComputeGold(
T *inarray, // 输入数组
T *reference, // 输出数组
const unsigned int len, // 数组的长度,处理元素的个数。
Operation op, // 运算类型
bool backward // 判断是否为后序扫描
);
// Kernel 函数: _addBackKer(中间结果数组加回操作)
// 对一个数组进行初始扫描后,将中间结果的小数组(原扫描数组每段最后一个元素)加
// 回到原扫描数组。
template < class T, class Operation >
__global__ void // Kernel 函数无返回值
_addBackKer(
T *array, // 初始扫描后的数组。
T *lastelemarray, // 中间结果数组,原扫描数组每段的最后一个元素提
// 取出来即为中间结果数组。
int n, // 元素个数,数组长度
int packnum, // 扫描核函数每块计算能力,即核函数每块的处理长
// 度与线程块大小的比值。
Operation op, // 运算类型
bool backward // 判断是否为后序扫描
);
// Kernel 函数: _scanNaiveKer(数组扫描的简单版本)
template < class T, class Operation >
__global__ void _scanNaiveKer(T *outarray, T *inarray, T *blocksum, int n,
Operation op, bool backward)
{
// 声明共享内存。
extern __shared__ unsigned char sharedmemo[];
// 转化为模板类型的共享内存。
T *sharedmem = (T *)sharedmemo;
// 数组索引(块内索引为 threadIdx.x)。
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// 定义共享内存中计算位置的两个变量。
// 共享内存大小是两倍的输入长度,所以通过 pout 和 pin 指针来控制计算位置。
T *pout = sharedmem;
T *pin = sharedmem + blockDim.x;
// 将需要计算的值从输入加载到共享内存上。大于数组长度的位置置为 0。
if (idx < n)
pout[threadIdx.x] = inarray[idx];
else
pout[threadIdx.x] = op.identity();
// 扫描过程,通过偏移量的控制,在两倍输入长度的共享内存上进行切换计算。
// 每次循环偏移量扩大 2 倍,这样能够实现不断的层次累加。最终实现 scan 的处
// 理效果。
if (!backward) {
for (int offset = 1; offset < blockDim.x; offset *= 2) {
// 共享内存大小是两倍的输入长度,所以通过 pout 和 pin 指针交换来换位
// 置进行处理,即计算值不覆盖原值。
T *ptemp;
ptemp = pout;
pout = pin;
pin = ptemp;
__syncthreads();
// 将所有当前的 scan 计算值,从共享内存的一侧复制到另一侧
// 一侧指共享内存采用两倍的输入数组的长度,即 double buffer。
pout[threadIdx.x] = pin[threadIdx.x];
// 如果线程索引大于偏移,那么要计算当前位置和当前位置减去偏移量处的
// 加和
if (threadIdx.x >= offset)
pout[threadIdx.x] = op(pout[threadIdx.x],
pin[threadIdx.x - offset]);
}
// 进行块内同步。
__syncthreads();
// 超出数组长度 n 的值不进行写入,直接返回。
if (idx >= n)
return;
// 将结果从共享内存写入到输出。
outarray[idx] = pout[threadIdx.x];
// 如果中间结果数组为空,不进行处理直接返回。
if (blocksum == NULL)
return;
// 每块的最后一个线程,将每段处理的最后一个元素写入中间结果数组。最后一
// 个线程块不进行处理。
if (threadIdx.x == blockDim.x - 1 &&
blockIdx.x < gridDim.x - 1) {
blocksum[blockIdx.x] = pout[threadIdx.x];
}
} else {
for (int offset = 1; offset < blockDim.x; offset *= 2) {
// 共享内存大小是两倍的输入长度,所以通过 pout 和 pin 指针交换来换位
// 置进行处理,即计算值不覆盖原值。
T *ptemp;
ptemp = pout;
pout = pin;
pin = ptemp;
__syncthreads();
// 将所有当前的 scan 计算值,从共享内存的一侧复制到另一侧
// 一侧指共享内存采用两倍的输入数组的长度,即 double buffer。
pout[threadIdx.x] = pin[threadIdx.x];
// 如果线程索引加上偏移量小于块长,那么要计算当前位置和当前位置加上
// 偏移量处的加和
if (threadIdx.x + offset < blockDim.x)
pout[threadIdx.x] = op(pin[threadIdx.x],
pin[threadIdx.x + offset]);
}
// 进行块内同步。
__syncthreads();
// 超出数组长度 n 的值不进行写入,直接返回。
if (idx >= n)
return;
// 将结果从共享内存写入到输出。
outarray[idx] = pout[threadIdx.x];
// 如果中间结果数组为空,不进行处理直接返回。
if (blocksum == NULL)
return;
// 每块的第一个线程,将每段处理的第一个元素写入中间结果数组。第一个线
// 程块不进行处理。
if (threadIdx.x == 0 && blockIdx.x > 0) {
blocksum[blockIdx.x - 1] = pout[threadIdx.x];
}
}
}
// Kernel 函数: _scanWorkEfficientKer(数组扫描的效率版本)
template < class T, class Operation >
__global__ void _scanWorkEfficientKer(T *outarray, T *inarray,
T *blocksum, int n, Operation op,
bool backward)
{
// 声明共享内存。
extern __shared__ unsigned char sharedmemo[];
// 转化为模板类型的共享内存。
T *sharedmem = (T *)sharedmemo;
// 定义块内索引。
int baseidx = 2 * blockIdx.x * blockDim.x;
int inidx = 2 * threadIdx.x;
int idx = baseidx + inidx;
// 定义偏移量 offset。
int offset = 1;
// 定义核函数每块的处理数组长度。
int length = blockDim.x * 2;
// 将需要计算的值从输入加载到共享内存上。每个线程处理两个元素(相邻元素)
sharedmem[inidx] = (idx < n) ? inarray[idx] : op.identity();
sharedmem[inidx + 1] = (idx + 1 < n) ? inarray[idx + 1] : op.identity();
// scan 的累加过程,自底向上进行累加操作。
if (!backward) {
for (int d = blockDim.x; d > 0; d >>= 1) {
// 进行块内同步。
__syncthreads();
// 当线程索引小于处理的范围 d,进行累加。
if (threadIdx.x < d) {
// 计算处理位置的索引。
int ai = offset * (inidx + 1) - 1;
int bi = offset * (inidx + 2) - 1;
// 累加。通过这样的过程使得最终最后一位的值是之前所有值 scan 操
// 作的结果。
sharedmem[bi] = op(sharedmem[bi], sharedmem[ai]);
}
// 偏移量每次扩大 2 倍。
offset *= 2;
}
// 配合自顶向下的回扫过程,清除最后一个位置上的值。
if (threadIdx.x == 0) {
// 根据运算类型不同,最后一个位置赋为不同的单位元。
sharedmem[length - 1] = op.identity();
}
// 自顶向下回扫,这样能够使每一位上算出需要的 scan 值。
// 回扫即把上一步的计算结果进行累加。
for (int d = 1; d < length; d *= 2) {
// 回扫过程中,每次偏移量缩小2倍。
offset >>= 1;
// 进行块内同步。
__syncthreads();
// 当线程索引小于处理的范围 d,进行回扫累加的过程。
if (threadIdx.x < d) {
// 计算处理位置的索引。
int ai = offset * (inidx + 1) - 1;
int bi = offset * (inidx + 2) - 1;
// 将 ai 位置处的值拷贝出来加到 bi 处,ai 的新值为 bi 原值。
T t = sharedmem[ai];
sharedmem[ai] = sharedmem[bi];
sharedmem[bi] = op(sharedmem[bi], t);
}
}
// 进行块内同步。
__syncthreads();
// 超出数组长度 n 的值不进行写入,直接返回。
if (idx >= n)
return;
// 将结果从共享内存写入到输出。
outarray[idx] = sharedmem[inidx + 1];
// 超出数组长度 n 的值不进行写入,直接返回。
if (idx + 1 >= n)
return;
// 判断是否为当前块处理数组的最后一个元素
if ((inidx + 1) == (2 * blockDim.x - 1))
// 是最后一个元素的话,需要与对应输入数组位置上的元素进行运算
outarray[idx + 1] = op(sharedmem[inidx + 1], inarray[idx + 1]);
else
// 每个线程处理的下一个元素,将结果从共享内存写入到输出。
outarray[idx + 1] = sharedmem[inidx + 2];
// 如果中间结果数组为空,不进行处理直接返回。
if (blocksum == NULL)
return;
// 每块的最后一个线程,将每段处理的最后一个元素写入中间结果数组。最后一
// 个线程块不进行处理。
if (threadIdx.x == blockDim.x - 1 &&
blockIdx.x < gridDim.x - 1) {
blocksum[blockIdx.x] = outarray[idx + 1];
}
} else {
for (int d = blockDim.x; d > 0; d >>= 1) {
// 进行块内同步。
__syncthreads();
// 当线程索引小于处理的范围 d,进行累加。
if (threadIdx.x < d) {
// 计算处理位置的索引。
int ai = offset * inidx;
int bi = offset * (inidx + 1);
// 累加。通过这样的过程使得最终第一位的值是之前所有值 scan 操
// 作的结果。
sharedmem[ai] = op(sharedmem[bi], sharedmem[ai]);
}
// 偏移量每次扩大 2 倍。
offset *= 2;
}
// 配合自顶向下的回扫过程,清除第一个位置上的值。
if (threadIdx.x == 0) {
// 根据运算类型不同,第一个位置赋为不同的单位元。
sharedmem[0] = op.identity();
}
// 自顶向下回扫,这样能够使每一位上算出需要的 scan 值。
// 回扫即把上一步的计算结果进行累加。
for (int d = 1; d < length; d *= 2) {
// 回扫过程中,每次偏移量缩小2倍。
offset >>= 1;
// 进行块内同步。
__syncthreads();
// 当线程索引小于处理的范围 d,进行回扫累加的过程。
if (threadIdx.x < d) {
// 计算处理位置的索引。
int ai = offset * inidx;
int bi = offset * (inidx + 1);
// 将 bi 位置处的值拷贝出来加到 ai 处,bi 的新值为 ai 原值。
T t = sharedmem[bi];
sharedmem[bi] = sharedmem[ai];
sharedmem[ai] = op(sharedmem[ai], t);
}
}
// 进行块内同步。
__syncthreads();
// 超出数组长度 n 的值不进行写入,直接返回。
if (idx >= n)
return;
// 判断是否为当前块处理数组的第一个元素
if (inidx == 0)
// 是第一个元素的话,需要与对应输入数组位置上的元素进行运算
outarray[idx] = op(sharedmem[inidx], inarray[idx]);
else
// 将结果从共享内存写入到输出。
outarray[idx] = sharedmem[inidx - 1];
// 超出数组长度 n 的值不进行写入,直接返回。
if (idx + 1 < n) {
// 每个线程处理的下一个元素,将结果从共享内存写入到输出。
outarray[idx + 1] = sharedmem[inidx];
}
// 如果中间结果数组为空,不进行处理直接返回。
if (blocksum == NULL)
return;
// 每块的第一个线程,将每段处理的第一个元素写入中间结果数组。第一个线程
// 块不进行处理。
if (threadIdx.x == 0 &&
blockIdx.x > 0) {
blocksum[blockIdx.x - 1] = outarray[idx];
}
}
}
// Kernel 函数: _scanOptKer(数组扫描的优化版本)
template < class T, class Operation >
__global__ void _scanOptKer(T *outarray, T *inarray,
T *blocksum, int n, Operation op, bool backward)
{
// 声明共享内存。
extern __shared__ unsigned char sharedmemo[];
// 转化为模板类型的共享内存。
T *sharedmem = (T *)sharedmemo;
// 定义块内索引。
int baseidx = 2 * blockIdx.x * blockDim.x;
// 定义核函数每块的处理数组长度。
int length = blockDim.x * 2;
// 定义计算位置的索引(块内)。
int ai = threadIdx.x;
int bi = threadIdx.x + blockDim.x;
// 定义数组索引(块外)。
int aindex = baseidx + ai;
int bindex = baseidx + bi;
// 将需要计算的值从输入加载到共享内存上。每个线程处理两个元素。
sharedmem[ai] = (aindex < n) ? inarray[aindex] : op.identity();
sharedmem[bi] = (bindex < n) ? inarray[bindex] : op.identity();
// 定义偏移值 offset。
int offset = 1;
if (!backward) {
// scan 的累加过程,自底向上进行累加操作。
for (int d = blockDim.x; d > 0; d >>= 1) {
// 进行块内同步。
__syncthreads();
// 当线程索引小于处理的范围 d,进行累加。
if (threadIdx.x < d) {
// 计算处理位置的索引。
int ai = offset * (2 * threadIdx.x + 1) - 1;
int bi = offset * (2 * threadIdx.x + 2) - 1;
// 累加。通过这样的过程使得最终最后一位的值是之前所有值 scan 操
// 作的结果。
sharedmem[bi] = op(sharedmem[bi], sharedmem[ai]);
}
// 偏移量每次扩大 2 倍。
offset *= 2;
}
// 配合自顶向下的回扫过程,清除最后一个位置上的值。
if (threadIdx.x == 0) {
int index = length - 1;
// 根据运算符类型,数组最后一个位置设为不同的单位元
sharedmem[index] = op.identity();
}
// 自顶向下回扫,这样能够使每一位上算出需要的 scan 值。
// 回扫即把上一步的计算结果进行累加。
for (int d = 1; d < length; d *= 2) {
// 回扫过程中,每次偏移量缩小2倍。
offset /= 2;
// 进行块内同步。
__syncthreads();
// 当线程索引小于处理的范围 d,进行回扫累加的过程。
if (threadIdx.x < d) {
// 计算处理位置的索引。
int ai = offset * (2 * threadIdx.x + 1) - 1;
int bi = offset * (2 * threadIdx.x + 2) - 1;
// 将 ai 位置处的值拷贝出来加到 bi 处,ai 的新值为 bi 原值。
T t = sharedmem[ai];
sharedmem[ai] = sharedmem[bi];
sharedmem[bi] = op(sharedmem[bi], t);
}
}
// 进行块内同步。
__syncthreads();
// 超出数组长度 n 的值不进行写入,直接返回。
if (aindex >= n)
return;
// 将结果从共享内存写入到输出。
outarray[aindex] = sharedmem[ai + 1];
// 超出数组长度 n 的值不进行写入,直接返回。
if (bindex >= n)
return;
// 判断是否为当前块处理数组的最后一个元素
if (bi == (2 * blockDim.x - 1))
// 是最后一个元素的话,需要与对应输入数组位置上的元素进行运算
outarray[bindex] = op(sharedmem[bi], inarray[bindex]);
else
// 每个线程处理的下一个元素,将结果从共享内存写入到输出。
outarray[bindex] = sharedmem[bi + 1];
// 如果中间结果数组为空,不进行处理直接返回。
if (blocksum == NULL)
return;
// 每块的最后一个线程,将每段处理的最后一个元素写入中间结果数组。最后一个线
// 程块不进行处理。
if (threadIdx.x == blockDim.x - 1 &&
blockIdx.x < gridDim.x - 1) {
blocksum[blockIdx.x] = outarray[bindex];
}
} else {
// scan 的累加过程,自底向上进行累加操作。
for (int d = blockDim.x; d > 0; d >>= 1) {
// 进行块内同步。
__syncthreads();
// 当线程索引小于处理的范围 d,进行累加。
if (threadIdx.x < d) {
// 计算处理位置的索引。
int ai = offset * 2 * threadIdx.x;
int bi = offset * (2 * threadIdx.x + 1);
// 累加。通过这样的过程使得最终第一位的值是之前所有值 scan 操
// 作的结果。
sharedmem[ai] = op(sharedmem[bi], sharedmem[ai]);
}
// 偏移量每次扩大 2 倍。
offset *= 2;
}
// 配合自顶向下的回扫过程,清除第一个位置上的值。
if (threadIdx.x == 0) {
int index = 0;
// 根据运算符类型,数组第一个位置设为不同的单位元
sharedmem[index] = op.identity();
}
// 自顶向下回扫,这样能够使每一位上算出需要的 scan 值。
// 回扫即把上一步的计算结果进行累加。
for (int d = 1; d < length; d *= 2) {
// 回扫过程中,每次偏移量缩小2倍。
offset /= 2;
// 进行块内同步。
__syncthreads();
// 当线程索引小于处理的范围 d,进行回扫累加的过程。
if (threadIdx.x < d) {
// 计算处理位置的索引。
int ai = offset * 2 * threadIdx.x;
int bi = offset * (2 * threadIdx.x + 1);
// 将 bi 位置处的值拷贝出来加到 ai 处,bi 的新值为 ai 原值。
T t = sharedmem[bi];
sharedmem[bi] = sharedmem[ai];
sharedmem[ai] = op(sharedmem[ai], t);
}
}
// 进行块内同步。
__syncthreads();
// 超出数组长度 n 的值不进行写入,直接返回。
if (aindex >= n)
return;
// 判断是否为当前块处理数组的第一个元素
if (ai == 0)
// 是第一个元素的话,需要与对应输入数组位置上的元素进行运算
outarray[aindex] = op(sharedmem[ai], inarray[aindex]);
else
// 将结果从共享内存写入到输出。
outarray[aindex] = sharedmem[ai - 1];
// 超出数组长度 n 的值不进行写入,直接返回。
if (bindex < n) {
// 每个线程处理的下一个元素,将结果从共享内存写入到输出。
outarray[bindex] = sharedmem[bi - 1];
}
// 如果中间结果数组为空,不进行处理直接返回。
if (blocksum == NULL)
return;
// 每块的第一个线程,将每段处理的第一个元素写入中间结果数组。第一个线
// 程块不进行处理。
if (threadIdx.x == 0 &&
blockIdx.x > 0) {
blocksum[blockIdx.x - 1] = outarray[aindex];
}
}
}
// 宏:CONFLICT_FREE_OFFSET
// 定义此是为了更严格的避免 bank conflicts,即使在树的低层上。
#ifdef ZERO_BANK_CONFLICTS
# define CONFLICT_FREE_OFFSET(index) \
(((index) >> LOG_NUM_BANKS) + ((index) >> (2 * LOG_NUM_BANKS)))
#else
# define CONFLICT_FREE_OFFSET(index) ((index) >> LOG_NUM_BANKS)
#endif
// Kernel 函数: _scanBetterKer(数组扫描的Better版本)
template < class T, class Operation >
__global__ void _scanBetterKer(T *outarray, T *inarray,
T *blocksum, int n, Operation op, bool backward)
{
// 声明共享内存。
extern __shared__ unsigned char sharedmemo[];
// 转化为模板类型的共享内存。
T *sharedmem = (T *)sharedmemo;
// 本地变量,基索引。
int baseidx = 2 * blockIdx.x * blockDim.x;
int idx = threadIdx.x + blockDim.x;
// 定义核函数每块的处理数组长度。
int length = blockDim.x * 2;
// 定义计算位置的索引(块内,加上 bankOffset)。
int ai = threadIdx.x + CONFLICT_FREE_OFFSET(threadIdx.x);
int bi = idx + CONFLICT_FREE_OFFSET(idx);
// 定义数组索引(块外)。
int aindex = baseidx + threadIdx.x;
int bindex = aindex + blockDim.x;
// 将需要计算的值从输入加载到共享内存上。每个线程处理两个元素。
sharedmem[ai] = (aindex < n) ? inarray[aindex] : op.identity();
sharedmem[bi] = (bindex < n) ? inarray[bindex] : op.identity();
// 定义偏移值 offset。
int offset = 1;
if (!backward) {
// scan 的累加过程,自底向上进行累加操作。
for (int d = blockDim.x; d > 0; d >>= 1) {
// 进行块内同步。
__syncthreads();
// 当线程索引小于处理的范围 d,进行累加。
if (threadIdx.x < d) {
// 计算处理位置的索引。
int ci = offset * (2 * threadIdx.x + 1) - 1;
int di = offset * (2 * threadIdx.x + 2) - 1;
// 避免 bank conflicts,修改计算位置索引。
ci += CONFLICT_FREE_OFFSET(ci);
di += CONFLICT_FREE_OFFSET(di);
// 累加。通过这样的过程使得最终最后一位的值是之前所有值 scan 操
// 作的结果。
sharedmem[di] = op(sharedmem[di], sharedmem[ci]);
}
// 偏移量每次扩大 2 倍。
offset *= 2;
}
// 配合自顶向下的回扫过程,清除最后一个位置上的值。
if (threadIdx.x == 0) {
int index = length - 1;
// 避免 bank conflicts,重新计算索引。
index += CONFLICT_FREE_OFFSET(index);
// 根据运算符类型,数组最后一个位置设为不同的单位元。
sharedmem[index] = op.identity();
}
// 自顶向下回扫,这样能够使每一位上算出需要的 scan 值。
// 回扫即把上一步的计算结果进行累加。
for (int d = 1; d < length; d *= 2) {
// 回扫过程中,每次偏移量缩小2倍。
offset /= 2;
// 进行块内同步。
__syncthreads();
// 当线程索引小于处理的范围 d,进行回扫累加的过程。
if (threadIdx.x < d) {
// 计算处理位置的索引。
int ci = offset * (2 * threadIdx.x + 1) - 1;
int di = offset * (2 * threadIdx.x + 2) - 1;
// 避免 bank conflicts,重新计算索引。
ci += CONFLICT_FREE_OFFSET(ci);
di += CONFLICT_FREE_OFFSET(di);
// 将 ai 位置处的值拷贝出来加到 bi 处,ai 的新值为 bi 原值。
T t = sharedmem[ci];
sharedmem[ci] = sharedmem[di];
sharedmem[di] = op(sharedmem[di], t);
}
}
// 进行块内同步。
__syncthreads();
// 超出数组长度 n 的值不进行写入,直接返回。
if (aindex >= n)
return;
// 将结果从共享内存写入到输出。
outarray[aindex] = op(sharedmem[ai], inarray[aindex]);
// 超出数组长度 n 的值不进行写入,直接返回。
if (bindex >= n)
return;
// 每个线程处理的下一个元素,将结果从共享内存写入到输出。
outarray[bindex] = op(sharedmem[bi], inarray[bindex]);
// 如果中间结果数组为空,不进行处理直接返回。
if (blocksum == NULL)
return;
// 每块的最后一个线程,将每段处理的最后一个元素写入中间结果数组。最后一个线
// 程块不进行处理。
if (threadIdx.x == blockDim.x - 1 &&
blockIdx.x < gridDim.x - 1) {
blocksum[blockIdx.x] = outarray[bindex];
}
} else {
// scan 的累加过程,自底向上进行累加操作。
for (int d = blockDim.x; d > 0; d >>= 1) {
// 进行块内同步。
__syncthreads();
// 当线程索引小于处理的范围 d,进行累加。
if (threadIdx.x < d) {
// 计算处理位置的索引。
int ci = offset * 2 * threadIdx.x;
int di = offset * (2 * threadIdx.x + 1);
// 避免 bank conflicts,修改计算位置索引。
ci += CONFLICT_FREE_OFFSET(ci);
di += CONFLICT_FREE_OFFSET(di);
// 累加。通过这样的过程使得最终第一位的值是之前所有值 scan 操
// 作的结果。
sharedmem[ci] = op(sharedmem[di], sharedmem[ci]);
}
// 偏移量每次扩大 2 倍。
offset *= 2;
}
// 配合自顶向下的回扫过程,清除第一个位置上的值。
if (threadIdx.x == 0) {
int index = 0;
// 避免 bank conflicts,重新计算索引。
index += CONFLICT_FREE_OFFSET(index);
// 根据运算符类型,数组第一个位置设为不同的单位元。
sharedmem[index] = op.identity();
}
// 自顶向下回扫,这样能够使每一位上算出需要的 scan 值。
// 回扫即把上一步的计算结果进行累加。
for (int d = 1; d < length; d *= 2) {
// 回扫过程中,每次偏移量缩小2倍。
offset /= 2;
// 进行块内同步。
__syncthreads();
// 当线程索引小于处理的范围 d,进行回扫累加的过程。
if (threadIdx.x < d) {
// 计算处理位置的索引。
int ci = offset * 2 * threadIdx.x;
int di = offset * (2 * threadIdx.x + 1);
// 避免 bank conflicts,重新计算索引。
ci += CONFLICT_FREE_OFFSET(ci);
di += CONFLICT_FREE_OFFSET(di);
// 将 di 位置处的值拷贝出来加到 ci 处,di 的新值为 ci 原值。
T t = sharedmem[di];
sharedmem[di] = sharedmem[ci];
sharedmem[ci] = op(sharedmem[ci], t);
}
}
// 进行块内同步。
__syncthreads();
// 超出数组长度 n 的值不进行写入,直接返回。
if (aindex >= n)
return;
// 处理的第一个元素,需要与对应输入数组位置上的元素进行运算
outarray[aindex] = op(sharedmem[ai], inarray[aindex]);
// 超出数组长度 n 的值不进行写入,直接返回。
if (bindex < n) {
// 每个线程处理的下一个元素,将结果从共享内存写入到输出。
outarray[bindex] = op(sharedmem[bi], inarray[bindex]);
}
// 如果中间结果数组为空,不进行处理直接返回。
if (blocksum == NULL)
return;
// 每块的第一个线程,将每段处理的第一个元素写入中间结果数组。第一个线
// 程块不进行处理。
if (threadIdx.x == 0 &&
blockIdx.x > 0) {
blocksum[blockIdx.x - 1] = outarray[baseidx];
}
}
}
// 函数:scanComputeGold(CPU 端的 inclusive 类型数组计算)
template < class T, class Operation >
__host__ int scanComputeGold(T *inarray, T *reference,
const unsigned int len, Operation op,
bool backward)
{
// 计数器变量
int i;
if (!backward) {
// 初始化第一个输出元素为 inarray[0]
reference[0] = inarray[0];
for (i = 1; i < len; ++i) {
// 前序迭代累加计算
reference[i] = op(inarray[i], reference[i-1]);
}
} else {
// 初始化最后一个输出元素为 inarray[len - 1]
reference[len - 1] = inarray[len - 1];
for (i = len - 2; i >= 0; i--) {
// 后序迭代累加计算
reference[i] = op(inarray[i], reference[i+1]);
}
}
// 处理完毕退出。
return NO_ERROR;
}
// Kernel 函数: _addBackKer(中间结果数组加回操作)
template < class T, class Operation >
__global__ void _addBackKer(T *array, T *lastelemarray, int n,
int packnum, Operation op, bool backward)
{
// 声明共享内存。用来存放中间结果小数组中的元素,也就是原数组的每段最后
// 一个或第一个元素。
__shared__ T lastelement[1];
if (!backward) {
// 用每块的第一个线程来读取每块前一块的最后一个元素,从中间结果数组中读
// 取。
if (threadIdx.x == 0)
lastelement[0] = lastelemarray[blockIdx.x];
// 计算需要进行块间累加位置索引(块外的数组索引)。
unsigned int idx = (blockIdx.x + 1) * (blockDim.x * packnum) +
threadIdx.x;
// 块内同步。
__syncthreads();
// 每个线程处理两个元素,将中间结果数组中的值加回到原数组。
for (int i = 0; i < packnum; i++) {
// 如果索引大于处理数组长度,则退出。
if (idx >= n)
break;
// 将中间结果加回。
array[idx] = op(array[idx],lastelement[0]);
// 计算每个线程处理的下一个元素的索引值。
idx += blockDim.x;
}
} else {
// 用每块的第一个线程来读取每块后一块的第一个元素,从中间结果数组中读
// 取。
if (threadIdx.x == 0) {
lastelement[0] = lastelemarray[blockIdx.x];
}
// 计算需要进行块间累加位置索引(块外的数组索引)。
unsigned int idx = blockIdx.x * (blockDim.x * packnum) + threadIdx.x;
// 块内同步。
__syncthreads();
// 每个线程处理两个元素,将中间结果数组中的值加回到原数组。
for (int i = 0; i < packnum; i++) {
// 如果索引大于处理数组长度,则退出。
if (idx >= n)
break;
// 将中间结果加回。
array[idx] = op(array[idx], lastelement[0]);
// 计算每个线程处理的下一个元素的索引值。
idx += blockDim.x;
}
}
}
// Host 成员方法:addBack(float 型的中间结果加回)
template< class Operation >
__host__ int ScanArray::addBack(float *array, float *lastelemarray,
int numelements, int blocksize, int packnum,
Operation op, bool backward)
{
// 检查输入和输出是否为 NULL,如果为 NULL 直接报错返回。
if (array == NULL || lastelemarray == NULL)
return NULL_POINTER;
// 检查处理的数组长度,如果小于 0 出错。
if (numelements < 0)
return INVALID_DATA;
// 计算线程块大小。
int gridsize = (numelements + blocksize * packnum - 1) /
(blocksize * packnum) - 1;
// 判断 gridsize 大小,如果小于 1,则不用进行加回操作。返回正确。
if (gridsize < 1)
return NO_ERROR;
// 调用 _addBackKer 核函数,将中间结果数组加回到原扫描数组。
_addBackKer<<<gridsize, blocksize>>>(array, lastelemarray,
numelements, packnum, op, backward);
// 判断是否出错。
if (cudaGetLastError() != cudaSuccess)
return CUDA_ERROR;
// 处理完毕退出。
return NO_ERROR;
}
// Host 成员方法:addBack(int 型的中间结果加回)
template< class Operation >
__host__ int ScanArray::addBack(int *array, int *lastelemarray,
int numelements, int blocksize, int packnum,
Operation op, bool backward)
{
// 检查输入和输出是否为 NULL,如果为 NULL 直接报错返回。
if (array == NULL || lastelemarray == NULL)
return NULL_POINTER;
// 检查处理的数组长度,如果小于 0 出错。
if (numelements < 0)
return INVALID_DATA;
// 计算线程块大小。
int gridsize = (numelements + blocksize * packnum - 1) /
(blocksize * packnum) - 1;
// 判断 gridsize 大小,如果小于 1,则不用进行加回操作。返回正确。
if (gridsize < 1)
return NO_ERROR;
// 调用 _addBackKer 核函数,将中间结果数组加回到原扫描数组。
_addBackKer<<<gridsize, blocksize>>>(array, lastelemarray,
numelements, packnum, op, backward);
// 判断是否出错。
if (cudaGetLastError() != cudaSuccess)
return CUDA_ERROR;
// 处理完毕退出。
return NO_ERROR;
}
// 宏:SCANFREE
// 如果出错,就释放之前申请的内存。
#define SCANFREE do { \
if (gridsize > 1) \
cudaFree(blocksumDev); \
if (hostinarray) \
cudaFree(inarrayDev); \
if (hostoutarray) \
cudaFree(outarrayDev); \
if (!hostinarray) \
delete inarrayHost; \
if (!hostoutarray) \
delete outarrayHost; \
} while (0)
// Host 成员方法:scanArray(float 型的数组扫描)
template< class Operation >
__host__ int ScanArray::scanArray(float *inarray, float *outarray,
int numelements, Operation op, bool backward,
bool hostinarray, bool hostoutarray)
{
// 检查输入和输出是否为 NULL,如果为 NULL 直接报错返回。
if (inarray == NULL || outarray == NULL)
return NULL_POINTER;
// 本程序实现的 4 种方法可处理的数组长度。必须加以判断控制。
if (numelements < 0)
return INVALID_DATA;
// 局部变量,错误码。
cudaError_t cuerrcode;
int errcode;
// 局部变量。
const unsigned int memsize = sizeof (float) * numelements;
unsigned int extraspace;
// 计算共享内存的长度。
unsigned int sharedmemsize = 0;
// 定义设备端的输入输出数组指针,当输入输出指针在 Host 端时,在设备端申请对
// 应大小的数组。
float *inarrayDev = NULL;
float *outarrayDev = NULL;
// 定义主机端的输入输出数组指针,当输入输出指针在 Device 端时,在主机端申请
// 对应大小的数组。
float *inarrayHost = NULL;
float *outarrayHost = NULL;
// 这里 scan 实现只支持单个线程块的计算,这里的 gridsize 可以设置的大于 1,
// 从而让多个线程块都运行相同程序来测速。计算调用 Kernel 函数的线程块的尺寸
// 和线程块的数量。
int gridsize;
int blocksize;
// 局部变量,中间结果存放数组。长度会根据线程块大小来确定。
float *blocksumDev = NULL;
// 中间结果数组的长度。
int blocksumsize;
// scan 算法中每个线程块的计算能力。核函数每块处理长度与线程块大小的比值。
int packnum;
// 针对 CPU 端的实现类型,选择路径进行处理。
if (scanType == CPU_IN_SCAN) {
// 判断当前 inarray 数组是否存储在 Host 端。若不是,则需要在 Host 端
// 为数组申请一段空间;若该数组是在 Host 端,则直接使用。
if (!hostinarray) {
// 为输入数组在 Host 端申请内存。
inarrayHost = new float[memsize];
// 将输入数组拷贝到主机端内存。
cuerrcode = cudaMemcpy(inarrayHost, inarray, memsize,
cudaMemcpyDeviceToHost);
if (cuerrcode != cudaSuccess)
return cuerrcode;
} else {
// 如果在主机端,则将指针传给主机端指针。
inarrayHost = inarray;
}
// 判断当前 outarray 数组是否存储在 Host 端。若不是,则需要在 Host 端
// 为数组申请一段空间;若该数组是在 Host 端,则直接使用。
if (!hostoutarray) {
// 为输出数组在 Host 端申请内存。
outarrayHost = new float[memsize];
// 将输出数组拷贝到主机端内存。
cuerrcode = cudaMemcpy(outarrayHost, outarray, memsize,
cudaMemcpyDeviceToHost);
if (cuerrcode != cudaSuccess)
return cuerrcode;
} else {
// 如果在主机端,则将指针传给主机端指针。
outarrayHost = outarray;
}
// 调用 inclusive 版的 scan 函数
errcode = scanComputeGold<float>(inarrayHost, outarrayHost,
numelements, op, backward);
// 出错则返回错误码。
if (errcode != NO_ERROR) {
// 释放内存
SCANFREE;
return errcode;
}
// 执行结束
return NO_ERROR;
}
// 判断当前 inarray 数组是否存储在 Host 端。若是,则需要在 Device 端为数组
// 申请一段空间;若该数组是在 Device端,则直接使用。
if (hostinarray) {
// 为输入数组在设备端申请内存。
cuerrcode = cudaMalloc((void **)&inarrayDev, memsize);
if (cuerrcode != cudaSuccess)
return cuerrcode;
// 将输入数组拷贝到设备端内存。
cuerrcode = cudaMemcpy(inarrayDev, inarray, memsize,
cudaMemcpyHostToDevice);
if (cuerrcode != cudaSuccess)
return cuerrcode;
} else {
// 如果在设备端,则将指针传给对应的设备端统一指针。
inarrayDev = inarray;
}
// 判断当前 outarray 数组是否存储在 Host 端。若是,则需要在 Device 端为数组
// 申请一段空间;若该数组是在 Device端,则直接使用。
if (hostoutarray) {
// 为输出数组在设备端申请内存。
cuerrcode = cudaMalloc((void **)&outarrayDev, memsize);
if (cuerrcode != cudaSuccess)
return cuerrcode;
} else {
// 如果在设备端,则将指针传给对应的设备端统一指针。
outarrayDev = outarray;
}
// 针对不同的实现类型,选择不同的路径进行处理。
switch(scanType) {
// 使用简单版本的 scan 实现。
case NAIVE_SCAN:
// 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。
// 简单版本每个线程处理一个元素。
blocksize = BLOCK_SIZE;
packnum = 1;
// 计算线程块大小和共享内存长度。
gridsize = max(1, (numelements + blocksize * packnum - 1) / blocksize);
sharedmemsize = sizeof (float) * blocksize * packnum;
// 如果扫描所需要的线程的 grid 尺寸大于 1,就需要进行加回操作,就需要申
// 请存放中间结果的数组。
if (gridsize > 1) {
// 需要将每段处理的最后一个元素取出,最后一个线程块不进行处理。
blocksumsize = gridsize - 1;
// 为存放中间结果的数组在设备端申请内存。
cuerrcode = cudaMalloc((void **)&blocksumDev,
blocksumsize * sizeof(float));
if (cuerrcode != cudaSuccess) {
cudaFree(blocksumDev);
return cuerrcode;
}
}
// 调用 Kernel 函数,完成实际的数组扫描。
// 这里需要判断输入输出指针是否在设备端。
_scanNaiveKer<float><<<gridsize, blocksize, 2 * sharedmemsize>>>(
outarrayDev, inarrayDev, blocksumDev, numelements, op,
backward);
// 判断核函数是否出错。
if (cudaGetLastError() != cudaSuccess) {
// 释放之前申请的内存。
SCANFREE;
return CUDA_ERROR;
}
break;
// 使用效率版本的 scan 实现。
case EFFICIENT_SCAN:
// 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。
// 效率版本每个线程处理两个元素。
blocksize = BLOCK_SIZE;
packnum = 2;
// 计算线程块大小和共享内存长度。
sharedmemsize = sizeof (float) * (blocksize * packnum);
gridsize = max(1, (numelements + packnum * blocksize - 1) /
(packnum * blocksize));
// 如果扫描所需要的线程的 grid 尺寸大于 1,就需要进行加回操作,就需要申
// 请存放中间结果的数组。
if (gridsize > 1) {
// 需要将每段处理的最后一个元素取出,最后一个线程块不进行处理。
blocksumsize = gridsize - 1;
// 为存放中间结果的数组在设备端申请内存。
cuerrcode = cudaMalloc((void **)&blocksumDev,
blocksumsize * sizeof(float));
if (cuerrcode != cudaSuccess) {
cudaFree(blocksumDev);
return cuerrcode;
}
}
// 调用 Kernel 函数,完成实际的数组扫描。
// 这里需要判断输入输出指针是否在设备端。
_scanWorkEfficientKer<float><<<gridsize, blocksize, sharedmemsize>>>(
outarrayDev, inarrayDev, blocksumDev, numelements, op,
backward);
// 判断是否出错。
if (cudaGetLastError() != cudaSuccess) {
// 释放之前申请的内存。
SCANFREE;
return CUDA_ERROR;
}
break;
// 使用优化版本的 scan 实现。
case OPTIMIZE_SCAN:
// 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。
// 优化版本每个线程处理两个元素。
blocksize = BLOCK_SIZE;
packnum = 2;
// 计算线程块大小和共享内存长度。
gridsize = max(1, (numelements + packnum * blocksize - 1) /
(packnum * blocksize));
sharedmemsize = sizeof (float) * (blocksize * packnum);
// 如果扫描所需要的线程的 grid 尺寸大于 1,就需要进行加回操作,就需要申
// 请存放中间结果的数组。
if (gridsize > 1) {
// 需要将每段处理的最后一个元素取出,最后一个线程块不进行处理。
blocksumsize = gridsize - 1;
// 为存放中间结果的数组在设备端申请内存。
cuerrcode = cudaMalloc((void **)&blocksumDev,
blocksumsize * sizeof(float));
if (cuerrcode != cudaSuccess) {
cudaFree(blocksumDev);
return cuerrcode;
}
}
// 调用 Kernel 函数,完成实际的数组扫描。
// 这里需要判断输入输出指针是否在设备端。
_scanOptKer<float><<<gridsize, blocksize, sharedmemsize>>>(
outarrayDev, inarrayDev, blocksumDev, numelements, op,
backward);
// 判断是否出错。
if (cudaGetLastError() != cudaSuccess) {
// 释放之前申请的内存。
SCANFREE;
return CUDA_ERROR;
}
break;
// 使用优化版本的 scan 实现。
case BETTER_SCAN:
// 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。
// 优化版本每个线程处理两个元素。
blocksize = BLOCK_SIZE;
packnum = 2;
// 计算线程块大小和共享内存长度。
gridsize = max(1, (numelements + packnum * blocksize - 1) /
(packnum * blocksize));
extraspace = blocksize * packnum / NUM_BANKS;;
sharedmemsize = sizeof (float) * (blocksize * packnum + extraspace);
// 如果扫描所需要的线程的 grid 尺寸大于 1,就需要进行加回操作,就需要申
// 请存放中间结果的数组。
if (gridsize > 1) {
// 需要将每段处理的最后一个元素取出,最后一个线程块不进行处理。
blocksumsize = gridsize - 1;
// 为存放中间结果的数组在设备端申请内存。
cuerrcode = cudaMalloc((void **)&blocksumDev,
blocksumsize * sizeof(float));
if (cuerrcode != cudaSuccess) {
cudaFree(blocksumDev);
return cuerrcode;
}
}
// 调用 Kernel 函数,完成实际的数组扫描。
// 这里需要判断输入输出指针是否在设备端。
_scanBetterKer<float><<<gridsize, blocksize, sharedmemsize>>>(
outarrayDev, inarrayDev, blocksumDev, numelements, op,
backward);
// 判断是否出错。
if (cudaGetLastError() != cudaSuccess) {
// 释放之前申请的内存。
SCANFREE;
return CUDA_ERROR;
}
break;
// 其他方式情况下,直接返回非法数据错误。
default:
if (hostinarray)
cudaFree(inarrayDev);
if (hostoutarray)
cudaFree(outarrayDev);
return INVALID_DATA;
}
// 如果处理的 gird 尺寸大于 1,那么对于扫描中间结果数组进行一次 scan
// 操作和加回操作。
if (gridsize > 1) {
// 递归调用扫描函数。此时输入输出数组皆为中间结果数组。
// 这里的递归调用不会调用多次,数组的规模是指数倍减小的。
errcode = scanArray(blocksumDev, blocksumDev, blocksumsize, op,
backward, false, false);
if (errcode != NO_ERROR) {
// 释放之前申请的内存。
SCANFREE;
return errcode;
}
// 调用加回函数,将各块的扫描中间结果加回到输出数组。
errcode = addBack(outarrayDev, blocksumDev, numelements,
blocksize, packnum, op, backward);
if (errcode != NO_ERROR) {
// 释放之前申请的内存。
SCANFREE;
return errcode;
}
// 释放中间结果数组的设备端内存。
cudaFree(blocksumDev);
}
// 如果 outarray 在 Host 端,将结果拷贝到输出。
if (hostoutarray) {
// 将结果从设备端内存拷贝到输出数组。
cuerrcode = cudaMemcpy(outarray, outarrayDev, memsize,
cudaMemcpyDeviceToHost);
if (cuerrcode != cudaSuccess) {
if (hostinarray)
cudaFree(inarrayDev);
cudaFree(outarrayDev);
return cuerrcode;
}
}
// 释放 Device 内存。需要判断输入输出参数是否在 host 端。
if (hostinarray)
cudaFree(inarrayDev);
if (hostoutarray)
cudaFree(outarrayDev);
// 处理完毕退出。
return NO_ERROR;
}
// Host 成员方法:scanArray(int 型的数组扫描)
template< class Operation >
__host__ int ScanArray::scanArray(int *inarray, int *outarray,
int numelements, Operation op, bool backward,
bool hostinarray, bool hostoutarray)
{
// 检查输入和输出是否为 NULL,如果为 NULL 直接报错返回。
if (inarray == NULL || outarray == NULL)
return NULL_POINTER;
// 本程序实现的 4 种方法可处理的数组长度。必须加以判断控制。
if (numelements < 0)
return INVALID_DATA;
// 局部变量,错误码。
cudaError_t cuerrcode;
int errcode;
// 局部变量。
int memsize = sizeof (int) * numelements;
int extraspace;
// 计算共享内存的长度。
int sharedmemsize = 0;
// 定义设备端的输入输出数组指针,当输入输出指针在 Host 端时,在设备端申请对
// 应大小的数组。
int *inarrayDev = NULL;
int *outarrayDev = NULL;
// 定义主机端的输入输出数组指针,当输入输出指针在 Device 端时,在主机端申请
// 对应大小的数组。
int *inarrayHost = NULL;
int *outarrayHost = NULL;
// 这里 scan 实现只支持单个线程块的计算,这里的 gridsize 可以设置的大于 1,
// 从而让多个线程块都运行相同程序来测速。计算调用 Kernel 函数的线程块的尺寸
// 和线程块的数量。
int gridsize;
int blocksize;
// 局部变量,中间结果存放数组。长度会根据线程块大小来确定。
int *blocksumDev = NULL;
// 中间结果数组的长度。
int blocksumsize;
// scan 算法中每个线程块的计算能力。核函数每块处理长度与线程块大小的比值。
int packnum;
// 针对 CPU 端实现类型,选择路径进行处理。
if (scanType == CPU_IN_SCAN) {
// 判断当前 inarray 数组是否存储在 Host 端。若不是,则需要在 Host 端
// 为数组申请一段空间;若该数组是在 Host 端,则直接使用。
if (!hostinarray) {
// 为输入数组在 Host 端申请内存。
inarrayHost = new int[memsize];
// 将输入数组拷贝到主机端内存。
cuerrcode = cudaMemcpy(inarrayHost, inarray, memsize,
cudaMemcpyDeviceToHost);
if (cuerrcode != cudaSuccess)
return cuerrcode;
} else {
// 如果在主机端,则将指针传给主机端指针。
inarrayHost = inarray;
}
// 判断当前 outarray 数组是否存储在 Host 端。若不是,则需要在 Host 端
// 为数组申请一段空间;若该数组是在 Host 端,则直接使用。
if (!hostoutarray) {
// 为输出数组在 Host 端申请内存。
outarrayHost = new int[memsize];
// 将输出数组拷贝到主机端内存。
cuerrcode = cudaMemcpy(outarrayHost, outarray, memsize,
cudaMemcpyDeviceToHost);
if (cuerrcode != cudaSuccess)
return cuerrcode;
} else {
// 如果在主机端,则将指针传给主机端指针。
outarrayHost = outarray;
}
// 调用 inclusive 版的 scan 函数
errcode = scanComputeGold<int>(inarrayHost, outarrayHost,
numelements, op, backward);
// 出错则返回错误码。
if (errcode != NO_ERROR) {
// 释放内存
SCANFREE;
return errcode;
}
// 执行结束
return NO_ERROR;
}
// 判断当前 inarray 数组是否存储在 Host 端。若是,则需要在 Device 端为数组
// 申请一段空间;若该数组是在 Device端,则直接使用。
if (hostinarray) {
// 为输入数组在设备端申请内存。
cuerrcode = cudaMalloc((void **)&inarrayDev, memsize);
if (cuerrcode != cudaSuccess)
return cuerrcode;
// 将输入数组拷贝到设备端内存。
cuerrcode = cudaMemcpy(inarrayDev, inarray, memsize,
cudaMemcpyHostToDevice);
if (cuerrcode != cudaSuccess)
return cuerrcode;
} else {
// 如果在设备端,则将指针传给对应的设备端统一指针。
inarrayDev = inarray;
}
// 判断当前 outarray 数组是否存储在 Host 端。若是,则需要在 Device 端为数组
// 申请一段空间;若该数组是在 Device端,则直接使用。
if (hostoutarray) {
// 为输出数组在设备端申请内存。
cuerrcode = cudaMalloc((void **)&outarrayDev, memsize);
if (cuerrcode != cudaSuccess)
return cuerrcode;
} else {
// 如果在设备端,则将指针传给对应的设备端统一指针。
outarrayDev = outarray;
}
// 针对不同的实现类型,选择不同的路径进行处理。
switch(scanType) {
// 使用简单版本的 scan 实现。
case NAIVE_SCAN:
// 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。
// 简单版本每个线程处理一个元素。
blocksize = BLOCK_SIZE;
packnum = 1;
// 计算线程块大小和共享内存长度。
gridsize = max(1, (numelements + blocksize * packnum - 1) / blocksize);
sharedmemsize = sizeof (int) * blocksize * packnum;
// 如果扫描所需要的线程的 grid 尺寸大于 1,就需要进行加回操作,就需要申
// 请存放中间结果的数组。
if (gridsize > 1) {
// 需要将每段处理的最后一个元素取出,最后一个线程块不进行处理。
blocksumsize = gridsize - 1;
// 为存放中间结果的数组在设备端申请内存。
cuerrcode = cudaMalloc((void **)&blocksumDev,
blocksumsize * sizeof(int));
if (cuerrcode != cudaSuccess) {
cudaFree(blocksumDev);
return cuerrcode;
}
}
// 调用 Kernel 函数,完成实际的数组扫描。
// 这里需要判断输入输出指针是否在设备端。
_scanNaiveKer<int><<<gridsize, blocksize, 2 * sharedmemsize>>>(
outarrayDev, inarrayDev, blocksumDev, numelements, op,
backward);
// 判断核函数是否出错。
if (cudaGetLastError() != cudaSuccess) {
// 释放之前申请的内存。
SCANFREE;
return CUDA_ERROR;
}
break;
// 使用效率版本的 scan 实现。
case EFFICIENT_SCAN:
// 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。
// 效率版本每个线程处理两个元素。
blocksize = BLOCK_SIZE;
packnum = 2;
// 计算线程块大小和共享内存长度。
sharedmemsize = sizeof (int) * (blocksize * packnum);
gridsize = max(1, (numelements + packnum * blocksize - 1) /
(packnum * blocksize));
// 如果扫描所需要的线程的 grid 尺寸大于 1,就需要进行加回操作,就需要申
// 请存放中间结果的数组。
if (gridsize > 1) {
// 需要将每段处理的最后一个元素取出,最后一个线程块不进行处理。
blocksumsize = gridsize - 1;
// 为存放中间结果的数组在设备端申请内存。
cuerrcode = cudaMalloc((void **)&blocksumDev,
blocksumsize * sizeof(int));
if (cuerrcode != cudaSuccess) {
cudaFree(blocksumDev);
return cuerrcode;
}
}
// 调用 Kernel 函数,完成实际的数组扫描。
// 这里需要判断输入输出指针是否在设备端。
_scanWorkEfficientKer<int><<<gridsize, blocksize, sharedmemsize>>>(
outarrayDev, inarrayDev, blocksumDev, numelements, op,
backward);
// 判断是否出错。
if (cudaGetLastError() != cudaSuccess) {
// 释放之前申请的内存。
SCANFREE;
return CUDA_ERROR;
}
break;
// 使用优化版本的 scan 实现。
case OPTIMIZE_SCAN:
// 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。
// 优化版本每个线程处理两个元素。
blocksize = BLOCK_SIZE;
packnum = 2;
// 计算线程块大小和共享内存长度。
gridsize = max(1, (numelements + packnum * blocksize - 1) /
(packnum * blocksize));
sharedmemsize = sizeof (int) * (blocksize * packnum);
// 如果扫描所需要的线程的 grid 尺寸大于 1,就需要进行加回操作,就需要申
// 请存放中间结果的数组。
if (gridsize > 1) {
// 需要将每段处理的最后一个元素取出,最后一个线程块不进行处理。
blocksumsize = gridsize - 1;
// 为存放中间结果的数组在设备端申请内存。
cuerrcode = cudaMalloc((void **)&blocksumDev,
blocksumsize * sizeof(int));
if (cuerrcode != cudaSuccess) {
cudaFree(blocksumDev);
return cuerrcode;
}
}
// 调用 Kernel 函数,完成实际的数组扫描。
// 这里需要判断输入输出指针是否在设备端。
_scanOptKer<int><<<gridsize, blocksize, sharedmemsize>>>(
outarrayDev, inarrayDev, blocksumDev, numelements, op,
backward);
// 判断是否出错。
if (cudaGetLastError() != cudaSuccess) {
// 释放之前申请的内存。
SCANFREE;
return CUDA_ERROR;
}
break;
// 使用优化版本的 scan 实现。
case BETTER_SCAN:
// 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。
// 优化版本每个线程处理两个元素。
blocksize = BLOCK_SIZE;
packnum = 2;
// 计算线程块大小和共享内存长度。
gridsize = max(1, (numelements + packnum * blocksize - 1) /
(packnum * blocksize));
extraspace = blocksize * packnum / NUM_BANKS;;
sharedmemsize = sizeof (int) * (blocksize * packnum + extraspace);
// 如果扫描所需要的线程的 grid 尺寸大于 1,就需要进行加回操作,就需要申
// 请存放中间结果的数组。
if (gridsize > 1) {
// 需要将每段处理的最后一个元素取出,最后一个线程块不进行处理。
blocksumsize = gridsize - 1;
// 为存放中间结果的数组在设备端申请内存。
cuerrcode = cudaMalloc((void **)&blocksumDev,
blocksumsize * sizeof(int));
if (cuerrcode != cudaSuccess) {
cudaFree(blocksumDev);
return cuerrcode;
}
}
// 调用 Kernel 函数,完成实际的数组扫描。
// 这里需要判断输入输出指针是否在设备端。
_scanBetterKer<int><<<gridsize, blocksize, sharedmemsize>>>(
outarrayDev, inarrayDev, blocksumDev, numelements, op,
backward);
// 判断是否出错。
if (cudaGetLastError() != cudaSuccess) {
// 释放之前申请的内存。
SCANFREE;
return CUDA_ERROR;
}
break;
// 其他方式情况下,直接返回非法数据错误。
default:
if (hostinarray)
cudaFree(inarrayDev);
if (hostoutarray)
cudaFree(outarrayDev);
return INVALID_DATA;
}
// 如果处理的 gird 尺寸大于 1,那么对于扫描中间结果数组进行一次 scan
// 操作和加回操作。
if (gridsize > 1) {
// 递归调用扫描函数。此时输入输出数组皆为中间结果数组。
// 这里的递归调用不会调用多次,数组的规模是指数倍减小的。
errcode = scanArray(blocksumDev, blocksumDev, blocksumsize, op,
backward, false, false);
if (errcode != NO_ERROR) {
// 释放之前申请的内存。
SCANFREE;
return errcode;
}
// 调用加回函数,将各块的扫描中间结果加回到输出数组。
errcode = addBack(outarrayDev, blocksumDev, numelements,
blocksize, packnum, op, backward);
if (errcode != NO_ERROR) {
// 释放之前申请的内存。
SCANFREE;
return errcode;
}
// 释放中间结果数组的设备端内存。
cudaFree(blocksumDev);
}
// 如果 outarray 在 Host 端,将结果拷贝到输出。
if (hostoutarray) {
// 将结果从设备端内存拷贝到输出数组。
cuerrcode = cudaMemcpy(outarray, outarrayDev, memsize,
cudaMemcpyDeviceToHost);
if (cuerrcode != cudaSuccess) {
if (hostinarray)
cudaFree(inarrayDev);
cudaFree(outarrayDev);
return cuerrcode;
}
}
// 释放 Device 内存。需要判断输入输出参数是否在 host 端。
if (hostinarray)
cudaFree(inarrayDev);
if (hostoutarray)
cudaFree(outarrayDev);
// 处理完毕退出。
return NO_ERROR;
}
// 函数:为了使模板运算能够连接通过,通过此函数预处理,将用到模板的函数实例化。
void example()
{
// 定义 ScanArray 对象
ScanArray s;
// 数组长度
unsigned int num_elements = 1024;
// 开辟空间的大小
const unsigned int mem_size = sizeof(float) * num_elements;
const unsigned int mem_size1 = sizeof(int) * num_elements;
// 为 float 型输入输出指针开辟空间
float *inarray = new float[mem_size];
float *outarray = new float[mem_size];
// 为 int 型输入输出指针开辟空间
int *inarray1 = new int[mem_size1];
int *outarray1 = new int[mem_size1];
// 设置扫描类型为 NAIVE_SCAN
s.setScanType(NAIVE_SCAN);
// 默认输入输出数组均在 host 端
bool inhost, outhost;
inhost = true;
outhost = true;
// 新建加法和乘法运算对象
add_class<float> a;
multi_class<float> m;
max_class<float> max;
min_class<float> min;
add_class<int> a1;
multi_class<int> m1;
max_class<int> max1;
min_class<int> min1;
// 用 float 型和 int 型分别调用加法和乘法的仿函数。
s.scanArray(inarray, outarray, num_elements, a, false, inhost, outhost);
s.scanArray(inarray1, outarray1, num_elements, a1, false, inhost, outhost);
s.scanArray(inarray, outarray, num_elements, m, false, inhost, outhost);
s.scanArray(inarray1, outarray1, num_elements, m1, false, inhost, outhost);
s.scanArray(inarray, outarray, num_elements, max, false, inhost, outhost);
s.scanArray(inarray1, outarray1, num_elements, max1, false, inhost, outhost);
s.scanArray(inarray, outarray, num_elements, min, false, inhost, outhost);
s.scanArray(inarray1, outarray1, num_elements, min1, false, inhost, outhost);
}
// 取消前面的宏定义。
#undef SCANFREE
|
the_stack
|
#include "caffe/util/math_functions.hpp"
#include "caffe/layers/operator/batch_norm_layer.hpp"
//#include "caffe/layers/operator/cudnn_batch_norm_layer.hpp"
#define BN_EPS float(1e-5)
namespace caffe {
//---------------------------------- forward ---------------
static __global__ void kernel_local_stats(int num, int channels, int spatial_dim, const float norm_factor, const float* bottom_data,
float* mean, float* var)
{
// store local E[x] to mean, E[x^2] to var temporarily
__shared__ float buffer1[CAFFE_CUDA_NUM_THREADS];
__shared__ float buffer2[CAFFE_CUDA_NUM_THREADS];
const int tid = threadIdx.x;
const int c = blockIdx.x;
// load and accumulate data on each thread
buffer1[tid] = buffer2[tid] = 0;
for (int i = tid; i < num * spatial_dim; i += blockDim.x)
{
const int index = i / spatial_dim * channels * spatial_dim + c * spatial_dim + i % spatial_dim;
buffer1[tid] += bottom_data[index];
buffer2[tid] += bottom_data[index] * bottom_data[index];
}
__syncthreads();
// do tree reduction
for (int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (tid < s)
{
buffer1[tid] += buffer1[tid + s];
buffer2[tid] += buffer2[tid + s];
}
__syncthreads();
}
// save the result back
if (tid == 0)
{
mean[c] = buffer1[0] / norm_factor;
var[c] = buffer2[0] / norm_factor;
}
}
static __global__ void kernel_forward( const int num, const int channels, const int spatial_dim,
const float* mean, const float* var, const float* bottom_data, float* top_data)
{
CUDA_KERNEL_LOOP(index, num * channels * spatial_dim)
{
int c = (index / spatial_dim) % channels;
top_data[index] = (bottom_data[index] - mean[c]) / sqrt(var[c] + BN_EPS);
}
}
//------------------------ backward -------
static __global__ void kernel_backward_mean_var(const int num, const int channels, const int spatial_dim,
const float* top_diff, const float* bottom_data, const float * mean_data, const float * var_data, float* mean_diff, float* var_diff)
{
__shared__ float buffer1[CAFFE_CUDA_NUM_THREADS];
__shared__ float buffer2[CAFFE_CUDA_NUM_THREADS];
const int tid = threadIdx.x;
const int c = blockIdx.x;
// load and accumulate data on each thread
buffer1[tid] = buffer2[tid] = 0;
for (int i = tid; i < num * spatial_dim; i += blockDim.x)
{
const int index = i / spatial_dim * channels * spatial_dim + c * spatial_dim + i % spatial_dim;
buffer1[tid] += top_diff[index] / sqrt(var_data[c] + BN_EPS);
buffer2[tid] += top_diff[index] * (bottom_data[index] - mean_data[c]) / sqrt(var_data[c] + BN_EPS);
}
__syncthreads();
// do tree reduction
for (int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (tid < s)
{
buffer1[tid] += buffer1[tid + s];
buffer2[tid] += buffer2[tid + s];
}
__syncthreads();
}
// save the result back
if (tid == 0)
{
mean_diff[c] = - buffer1[0];
var_diff[c] = - buffer2[0] / (2*(var_data[c] + BN_EPS));
}
}
static __global__ void kernel_backward_bottom_0(const int num, const int channels, const int spatial_dim, const float norm_factor, const float* top_diff,
const float* var, float* bottom_diff)
{
CUDA_KERNEL_LOOP(index, num * channels * spatial_dim)
{
int c = (index / spatial_dim) % channels;
const float inv_std = float(1) / sqrt(var[c] + BN_EPS);
bottom_diff[index] = inv_std * top_diff[index];
}
}
static __global__ void kernel_backward_bottom_1(const int num, const int channels, const int spatial_dim, const float norm_factor, const float* top_diff,
const float* var, float* bottom_diff)
{
CUDA_KERNEL_LOOP(index, num * channels * spatial_dim)
{
int c = (index / spatial_dim) % channels;
const float inv_std = float(1) / sqrt(var[c] + BN_EPS);
bottom_diff[index] += inv_std * top_diff[index];
}
}
static __global__ void kernel_mean_var_backward_bottom(const int num, const int channels, const int spatial_dim, const float norm_factor,
const float * mean_data, const float* var_data,const float * mean_diff, const float * var_diff,
const float* bottom_data, float* bottom_diff)
{
CUDA_KERNEL_LOOP(index, num * channels * spatial_dim)
{
int c = (index / spatial_dim) % channels;
bottom_diff[index] += mean_diff[c] / norm_factor
+ var_diff[c] / norm_factor * float(2) * (bottom_data[index] - mean_data[c]);
}
}
//----------------------------------------secforward-----------------------------
//------------------------ diff ------------------
static __global__ void kernel_secforward_diff_mean_diff_var(const int num, const int channels, const int spatial_dim, const int norm_factor,
const float* bottom_sec_diff, const float* bottom_data, const float * mean_data, const float * var_data, float* mean_sec_diff, float* var_sec_diff)
{
__shared__ float buffer1[CAFFE_CUDA_NUM_THREADS];
__shared__ float buffer2[CAFFE_CUDA_NUM_THREADS];
const int tid = threadIdx.x;
const int c = blockIdx.x;
// load and accumulate data on each thread
buffer1[tid] = buffer2[tid] = 0;
for (int i = tid; i < num * spatial_dim; i += blockDim.x)
{
const int index = i / spatial_dim * channels * spatial_dim + c * spatial_dim + i % spatial_dim;
buffer1[tid] += bottom_sec_diff[index];
buffer2[tid] += bottom_sec_diff[index] * (bottom_data[index] - mean_data[c]);
}
__syncthreads();
// do tree reduction
for (int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (tid < s)
{
buffer1[tid] += buffer1[tid + s];
buffer2[tid] += buffer2[tid + s];
}
__syncthreads();
}
// save the result back
if (tid == 0)
{
mean_sec_diff[c] = buffer1[0] / norm_factor;
var_sec_diff[c] = buffer2[0] * float(2) / norm_factor;
}
}
static __global__ void kernel_secforward_top(const int num, const int channels, const int spatial_dim, const float norm_factor, const float* bottom_sec_diff,
const float* var, float* top_sec_diff)
{
CUDA_KERNEL_LOOP(index, num * channels * spatial_dim)
{
int c = (index / spatial_dim) % channels;
const float inv_std = float(1) / sqrt(var[c] + BN_EPS);
top_sec_diff[index] = inv_std * bottom_sec_diff[index];
}
}
static __global__ void kernel_diff_mean_diff_var_secforward_top(const int num, const int channels, const int spatial_dim, const float norm_factor,
const float * mean_data, const float* var_data,const float * mean_sec_diff, const float * var_sec_diff,
const float* bottom_data, float* top_sec_diff)
{
CUDA_KERNEL_LOOP(index, num * channels * spatial_dim)
{
int c = (index / spatial_dim) % channels;
top_sec_diff[index] += - mean_sec_diff[c] / sqrt(var_data[c]+BN_EPS)
- var_sec_diff[c] * (bottom_data[index] - mean_data[c]) / pow(var_data[c]+BN_EPS, float(1.5)) * float(0.5);
}
}
//------------------------- data --------------
static __global__ void kernel_secforward_bottom(const int num, const int channels, const int spatial_dim, const float norm_factor,
const float * bottom_sec_diff, const float * top_diff,
const float * var_data, const float * var_diff, const float * var_sec_diff,
float * bottom_diff)
{
CUDA_KERNEL_LOOP(index, num * channels * spatial_dim)
{
int c = (index / spatial_dim) % channels;
bottom_diff[index] = bottom_sec_diff[index]*var_diff[c]*float(2)/norm_factor
- var_sec_diff[c]*top_diff[index]/pow(var_data[c]+BN_EPS,float(1.5))*float(0.5);
}
}
static __global__ void kernel_secforward_mean_var(const int num, const int channels, const int spatial_dim, const float norm_factor,
const float * bottom_sec_diff, const float * top_diff, const float * bottom_data,
const float * mean_data, const float * mean_sec_diff, const float * var_data, const float * var_sec_diff,
float * mean_diff, float * var_diff)
{
__shared__ float buffer_secx[CAFFE_CUDA_NUM_THREADS];
__shared__ float buffer_dy[CAFFE_CUDA_NUM_THREADS];
__shared__ float buffer_secx_dy[CAFFE_CUDA_NUM_THREADS];
__shared__ float buffer_x_dy[CAFFE_CUDA_NUM_THREADS];
const int tid = threadIdx.x;
const int c = blockIdx.x;
// load and accumulate data on each thread
buffer_secx[tid] = buffer_dy[tid] = buffer_secx_dy[tid] = buffer_x_dy[tid] = 0;
for (int i = tid; i < num * spatial_dim; i += blockDim.x)
{
const int index = i / spatial_dim * channels * spatial_dim + c * spatial_dim + i % spatial_dim;
buffer_secx[tid] += bottom_sec_diff[index];
buffer_dy[tid] += top_diff[index];
buffer_secx_dy[tid] += bottom_sec_diff[index]*top_diff[index];
buffer_x_dy[tid] += (bottom_data[index] - mean_data[c])*top_diff[index];
}
__syncthreads();
// do tree reduction
for (int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (tid < s)
{
buffer_secx[tid] += buffer_secx[tid + s];
buffer_dy[tid] += buffer_dy[tid + s];
buffer_secx_dy[tid] += buffer_secx_dy[tid + s];
buffer_x_dy[tid] += buffer_x_dy[tid + s];
}
__syncthreads();
}
// save the result back
if (tid == 0)
{
mean_diff[c] = -buffer_secx[0]*var_diff[c]*float(2)/norm_factor+var_sec_diff[c]*buffer_dy[0]/pow(var_data[c]+BN_EPS,float(1.5))*float(0.5);
var_diff[c] = -buffer_secx_dy[0]/pow(var_data[c]+BN_EPS,float(1.5))*float(0.5)
+mean_sec_diff[c]*buffer_dy[0]/pow(var_data[c]+BN_EPS,float(1.5))*float(0.5)
+var_sec_diff[c]*buffer_x_dy[0]/pow(var_data[c]+BN_EPS,float(2.5))*float(0.75);
}
}
//----------------------------------------------------
void BatchNormLayer::Forward_gpu(const vector<Blob*>& bottom, const vector<Blob*>& top)
{
int num = bottom[0]->num();
int channels = bottom[0]->channels();
int height = bottom[0]->height();
int width = bottom[0]->width();
if (Caffe::bn_state() == "frozen")
{
kernel_forward<<<CAFFE_GET_BLOCKS(bottom[0]->count()),CAFFE_CUDA_NUM_THREADS>>>
( num, channels, height * width,
this->blobs_[0]->gpu_data(), this->blobs_[1]->gpu_data(),
bottom[0]->gpu_data(),
top[0]->mutable_gpu_data());
}
else
{
kernel_local_stats<<<channels, CAFFE_CUDA_NUM_THREADS>>>
( num, channels, height * width,
static_cast<float>(num * height * width),
bottom[0]->gpu_data(),
mean_buffer_->mutable_gpu_data(),
var_buffer_->mutable_gpu_data());
caffe_gpu_mul(channels, mean_buffer_->gpu_data(), mean_buffer_->gpu_data(), mean_buffer_->mutable_gpu_sec_diff());
caffe_gpu_sub(channels, var_buffer_->gpu_data(), mean_buffer_->gpu_sec_diff(), var_buffer_->mutable_gpu_data());
if (Caffe::number_collect_sample == 0 && Caffe::bn_state() == "learned")
{
caffe_gpu_set(this->blobs_[0]->count(),float(0),this->blobs_[0]->mutable_gpu_data());
caffe_gpu_set(this->blobs_[1]->count(),float(0),this->blobs_[1]->mutable_gpu_data());
}
float factor;
if (Caffe::number_collect_sample == -1)
factor = 0.01;
else
factor = float(1)/float(Caffe::number_collect_sample+1);
caffe_gpu_axpby(mean_buffer_->count(),
factor, mean_buffer_->gpu_data(),
1-factor, this->blobs_[0]->mutable_gpu_data());
caffe_gpu_axpby(var_buffer_->count(),
factor, var_buffer_->gpu_data(),
1-factor, this->blobs_[1]->mutable_gpu_data());
kernel_forward<<<CAFFE_GET_BLOCKS(bottom[0]->count()),CAFFE_CUDA_NUM_THREADS>>>
( num, channels, height * width,
mean_buffer_->gpu_data(), var_buffer_->gpu_data(),
bottom[0]->gpu_data(),
top[0]->mutable_gpu_data());
}
}
void BatchNormLayer::Backward_gpu(const vector<Blob*>& top, const vector<Blob*>& bottom)
{
int num = bottom[0]->num();
int channels = bottom[0]->channels();
int height = bottom[0]->height();
int width = bottom[0]->width();
if (this->has_bottom_sec_diff_ == false)
{
kernel_backward_bottom_0<<<CAFFE_GET_BLOCKS(bottom[0]->count()),CAFFE_CUDA_NUM_THREADS>>>
(num, channels, height * width, static_cast<float>(num * height * width), top[0]->gpu_diff(),var_buffer_->gpu_data(),
bottom[0]->mutable_gpu_diff());
}
else
{
kernel_backward_bottom_1<<<CAFFE_GET_BLOCKS(bottom[0]->count()),CAFFE_CUDA_NUM_THREADS>>>
(num, channels, height * width, static_cast<float>(num * height * width), top[0]->gpu_diff(),var_buffer_->gpu_data(),
bottom[0]->mutable_gpu_diff());
}
kernel_backward_mean_var<<<channels, CAFFE_CUDA_NUM_THREADS>>>
( num, channels, height * width,
top[0]->gpu_diff(), bottom[0]->gpu_data(),mean_buffer_->gpu_data(),var_buffer_->gpu_data(),
mean_buffer_->mutable_gpu_diff(), var_buffer_->mutable_gpu_diff());
kernel_mean_var_backward_bottom<<<CAFFE_GET_BLOCKS(bottom[0]->count()),CAFFE_CUDA_NUM_THREADS>>>
( num, channels, height * width, static_cast<float>(num * height * width),
mean_buffer_->gpu_data(), var_buffer_->gpu_data(),mean_buffer_->gpu_diff(), var_buffer_->gpu_diff(),
bottom[0]->gpu_data(), bottom[0]->mutable_gpu_diff());
this->has_bottom_sec_diff_ = false;
}
void BatchNormLayer::SecForward_gpu(const vector<Blob*>& bottom, const vector<Blob*>& top)
{
int num = bottom[0]->num();
int channels = bottom[0]->channels();
int height = bottom[0]->height();
int width = bottom[0]->width();
//-------------------------------------- diff---------------------------------------
kernel_secforward_diff_mean_diff_var<<<channels, CAFFE_CUDA_NUM_THREADS>>>
( num, channels, height * width, static_cast<float>(num * height * width),
bottom[0]->gpu_sec_diff(), bottom[0]->gpu_data(),mean_buffer_->gpu_data(),var_buffer_->gpu_data(),
mean_buffer_->mutable_gpu_sec_diff(), var_buffer_->mutable_gpu_sec_diff());
kernel_secforward_top<<<CAFFE_GET_BLOCKS(bottom[0]->count()),CAFFE_CUDA_NUM_THREADS>>>
(num, channels, height * width, static_cast<float>(num * height * width), bottom[0]->gpu_sec_diff(),var_buffer_->gpu_data(),
top[0]->mutable_gpu_sec_diff());
kernel_diff_mean_diff_var_secforward_top<<<CAFFE_GET_BLOCKS(bottom[0]->count()),CAFFE_CUDA_NUM_THREADS>>>
( num, channels, height * width, static_cast<float>(num * height * width),
mean_buffer_->gpu_data(), var_buffer_->gpu_data(), mean_buffer_->gpu_sec_diff(), var_buffer_->gpu_sec_diff(),
bottom[0]->gpu_data(), top[0]->mutable_gpu_sec_diff());
//--------------------------------------- data ----------------------------
kernel_secforward_bottom<<<CAFFE_GET_BLOCKS(bottom[0]->count()),CAFFE_CUDA_NUM_THREADS>>>
(num, channels, height * width, static_cast<float>(num * height * width),
bottom[0]->gpu_sec_diff(), top[0]->gpu_diff(),
var_buffer_->gpu_data(), var_buffer_->gpu_diff(), var_buffer_->gpu_sec_diff(),
bottom[0]->mutable_gpu_diff());
kernel_secforward_mean_var<<<channels, CAFFE_CUDA_NUM_THREADS>>>
( num, channels, height * width, static_cast<float>(num * height * width),
bottom[0]->gpu_sec_diff(),top[0]->gpu_diff(),bottom[0]->gpu_data(),
mean_buffer_->gpu_data(), mean_buffer_->gpu_sec_diff(),var_buffer_->gpu_data(), var_buffer_->gpu_sec_diff(),
mean_buffer_->mutable_gpu_diff(), var_buffer_->mutable_gpu_diff());
kernel_mean_var_backward_bottom<<<CAFFE_GET_BLOCKS(bottom[0]->count()),CAFFE_CUDA_NUM_THREADS>>>
( num, channels, height * width, static_cast<float>(num * height * width),
mean_buffer_->gpu_data(), var_buffer_->gpu_data(),mean_buffer_->gpu_diff(), var_buffer_->gpu_diff(),
bottom[0]->gpu_data(), bottom[0]->mutable_gpu_diff());
this->has_bottom_sec_diff_ = true;
//---------------------------------------------------------------------------
}
} // namespace caffe
|
the_stack
|
* \file
* cub::BlockReduceTiles implements a stateful abstraction of CUDA thread blocks for participating in device-wide reduction.
*/
#pragma once
#include <iterator>
#include "../../block/block_load.cuh"
#include "../../block/block_reduce.cuh"
#include "../../grid/grid_mapping.cuh"
#include "../../grid/grid_queue.cuh"
#include "../../grid/grid_even_share.cuh"
#include "../../util_vector.cuh"
#include "../../util_namespace.cuh"
/// Optional outer namespace(s)
CUB_NS_PREFIX
/// CUB namespace
namespace cub {
/******************************************************************************
* Tuning policy types
******************************************************************************/
/**
* Tuning policy for BlockReduceTiles
*/
template <
int _BLOCK_THREADS, ///< Threads per thread block
int _ITEMS_PER_THREAD, ///< Items per thread per tile of input
int _VECTOR_LOAD_LENGTH, ///< Number of items per vectorized load
BlockReduceAlgorithm _BLOCK_ALGORITHM, ///< Cooperative block-wide reduction algorithm to use
PtxLoadModifier _LOAD_MODIFIER, ///< PTX load modifier
GridMappingStrategy _GRID_MAPPING> ///< How to map tiles of input onto thread blocks
struct BlockReduceTilesPolicy
{
enum
{
BLOCK_THREADS = _BLOCK_THREADS,
ITEMS_PER_THREAD = _ITEMS_PER_THREAD,
VECTOR_LOAD_LENGTH = _VECTOR_LOAD_LENGTH,
};
static const BlockReduceAlgorithm BLOCK_ALGORITHM = _BLOCK_ALGORITHM;
static const GridMappingStrategy GRID_MAPPING = _GRID_MAPPING;
static const PtxLoadModifier LOAD_MODIFIER = _LOAD_MODIFIER;
};
/******************************************************************************
* Thread block abstractions
******************************************************************************/
/**
* \brief BlockReduceTiles implements a stateful abstraction of CUDA thread blocks for participating in device-wide reduction.
*
* Each thread reduces only the values it loads. If \p FIRST_TILE, this
* partial reduction is stored into \p thread_aggregate. Otherwise it is
* accumulated into \p thread_aggregate.
*/
template <
typename BlockReduceTilesPolicy,
typename InputIteratorRA,
typename SizeT,
typename ReductionOp>
struct BlockReduceTiles
{
//---------------------------------------------------------------------
// Types and constants
//---------------------------------------------------------------------
typedef typename std::iterator_traits<InputIteratorRA>::value_type T; // Type of input iterator
typedef VectorHelper<T, BlockReduceTilesPolicy::VECTOR_LOAD_LENGTH> VecHelper; // Helper type for vectorizing loads of T
typedef typename VecHelper::Type VectorT; // Vector of T
// Constants
enum
{
BLOCK_THREADS = BlockReduceTilesPolicy::BLOCK_THREADS,
ITEMS_PER_THREAD = BlockReduceTilesPolicy::ITEMS_PER_THREAD,
TILE_ITEMS = BLOCK_THREADS * ITEMS_PER_THREAD,
VECTOR_LOAD_LENGTH = BlockReduceTilesPolicy::VECTOR_LOAD_LENGTH,
// Can vectorize according to the policy if the input iterator is a native pointer to a built-in primitive
CAN_VECTORIZE = (BlockReduceTilesPolicy::VECTOR_LOAD_LENGTH > 1) &&
(IsPointer<InputIteratorRA>::VALUE) &&
(VecHelper::BUILT_IN),
};
static const PtxLoadModifier LOAD_MODIFIER = BlockReduceTilesPolicy::LOAD_MODIFIER;
static const BlockReduceAlgorithm BLOCK_ALGORITHM = BlockReduceTilesPolicy::BLOCK_ALGORITHM;
// Parameterized BlockReduce primitive
typedef BlockReduce<T, BLOCK_THREADS, BlockReduceTilesPolicy::BLOCK_ALGORITHM> BlockReduceT;
/// Shared memory type required by this thread block
typedef typename BlockReduceT::TempStorage _TempStorage;
/// Alias wrapper allowing storage to be unioned
struct TempStorage : Uninitialized<_TempStorage> {};
//---------------------------------------------------------------------
// Per-thread fields
//---------------------------------------------------------------------
T thread_aggregate; ///< Each thread's partial reduction
_TempStorage& temp_storage; ///< Reference to temp_storage
InputIteratorRA d_in; ///< Input data to reduce
ReductionOp reduction_op; ///< Binary reduction operator
int first_tile_size; ///< Size of first tile consumed
bool input_aligned; ///< Whether or not input is vector-aligned
//---------------------------------------------------------------------
// Interface
//---------------------------------------------------------------------
/**
* Constructor
*/
__device__ __forceinline__ BlockReduceTiles(
TempStorage& temp_storage, ///< Reference to temp_storage
InputIteratorRA d_in, ///< Input data to reduce
ReductionOp reduction_op) ///< Binary reduction operator
:
temp_storage(temp_storage.Alias()),
d_in(d_in),
reduction_op(reduction_op),
first_tile_size(0),
input_aligned(CAN_VECTORIZE && ((size_t(d_in) & (sizeof(VectorT) - 1)) == 0))
{}
/**
* Process a single tile of input
*/
template <bool FULL_TILE>
__device__ __forceinline__ void ConsumeTile(
SizeT block_offset, ///< The offset the tile to consume
int valid_items = TILE_ITEMS) ///< The number of valid items in the tile
{
if (FULL_TILE)
{
T stripe_partial;
// Load full tile
if (input_aligned)
{
// Alias items as an array of VectorT and load it in striped fashion
enum { WORDS = ITEMS_PER_THREAD / VECTOR_LOAD_LENGTH };
VectorT vec_items[WORDS];
// Load striped into vec items
VectorT* alias_ptr = reinterpret_cast<VectorT*>(d_in + block_offset + (threadIdx.x * VECTOR_LOAD_LENGTH));
#pragma unroll
for (int i = 0; i < WORDS; ++i)
vec_items[i] = alias_ptr[BLOCK_THREADS * i];
// Reduce items within each thread stripe
stripe_partial = ThreadReduce<ITEMS_PER_THREAD>(
reinterpret_cast<T*>(vec_items),
reduction_op);
}
else
{
T items[ITEMS_PER_THREAD];
// Load items in striped fashion
LoadStriped<LOAD_MODIFIER, BLOCK_THREADS>(threadIdx.x, d_in + block_offset, items);
// Reduce items within each thread stripe
stripe_partial = ThreadReduce(items, reduction_op);
}
// Update running thread aggregate
thread_aggregate = (first_tile_size) ?
reduction_op(thread_aggregate, stripe_partial) : // Update
stripe_partial; // Assign
}
else
{
// Partial tile
int thread_offset = threadIdx.x;
if (!first_tile_size && (thread_offset < valid_items))
{
// Assign thread_aggregate
thread_aggregate = ThreadLoad<LOAD_MODIFIER>(d_in + block_offset + thread_offset);
thread_offset += BLOCK_THREADS;
}
while (thread_offset < valid_items)
{
// Update thread aggregate
T item = ThreadLoad<LOAD_MODIFIER>(d_in + block_offset + thread_offset);
thread_aggregate = reduction_op(thread_aggregate, item);
thread_offset += BLOCK_THREADS;
}
}
// Set first tile size if necessary
if (!first_tile_size)
first_tile_size = valid_items;
}
//---------------------------------------------------------------------
// Consume a contiguous segment of tiles
//---------------------------------------------------------------------
/**
* \brief Reduce a contiguous segment of input tiles
*/
__device__ __forceinline__ void ConsumeTiles(
SizeT block_offset, ///< [in] Threadblock begin offset (inclusive)
SizeT block_oob, ///< [in] Threadblock end offset (exclusive)
T &block_aggregate) ///< [out] Running total
{
// Consume subsequent full tiles of input
while (block_offset + TILE_ITEMS <= block_oob)
{
ConsumeTile<true>(block_offset);
block_offset += TILE_ITEMS;
}
// Consume a partially-full tile
if (block_offset < block_oob)
{
int valid_items = block_oob - block_offset;
ConsumeTile<false>(block_offset, valid_items);
}
// Compute block-wide reduction
block_aggregate = (first_tile_size < TILE_ITEMS) ?
BlockReduceT(temp_storage).Reduce(thread_aggregate, reduction_op, first_tile_size) :
BlockReduceT(temp_storage).Reduce(thread_aggregate, reduction_op);
}
/**
* Reduce a contiguous segment of input tiles
*/
__device__ __forceinline__ void ConsumeTiles(
SizeT num_items, ///< [in] Total number of global input items
GridEvenShare<SizeT> &even_share, ///< [in] GridEvenShare descriptor
GridQueue<SizeT> &queue, ///< [in,out] GridQueue descriptor
T &block_aggregate, ///< [out] Running total
Int2Type<GRID_MAPPING_EVEN_SHARE> is_even_share) ///< [in] Marker type indicating this is an even-share mapping
{
// Initialize even-share descriptor for this thread block
even_share.BlockInit();
// Consume input tiles
ConsumeTiles(even_share.block_offset, even_share.block_oob, block_aggregate);
}
//---------------------------------------------------------------------
// Dynamically consume tiles
//---------------------------------------------------------------------
/**
* Dequeue and reduce tiles of items as part of a inter-block scan
*/
__device__ __forceinline__ void ConsumeTiles(
int num_items, ///< Total number of input items
GridQueue<SizeT> queue, ///< Queue descriptor for assigning tiles of work to thread blocks
T &block_aggregate) ///< [out] Running total
{
// Shared dequeue offset
__shared__ SizeT dequeue_offset;
// We give each thread block at least one tile of input.
SizeT block_offset = blockIdx.x * TILE_ITEMS;
SizeT even_share_base = gridDim.x * TILE_ITEMS;
if (block_offset + TILE_ITEMS <= num_items)
{
// Consume full tile of input
ConsumeTile<true>(block_offset);
// Dequeue more tiles
while (true)
{
// Dequeue a tile of items
if (threadIdx.x == 0)
dequeue_offset = queue.Drain(TILE_ITEMS) + even_share_base;
__syncthreads();
// Grab tile offset and check if we're done with full tiles
block_offset = dequeue_offset;
__syncthreads();
if (block_offset + TILE_ITEMS > num_items)
break;
// Consume a full tile
ConsumeTile<true>(block_offset);
}
}
if (block_offset < num_items)
{
int valid_items = num_items - block_offset;
ConsumeTile<false>(block_offset, valid_items);
}
// Compute block-wide reduction
block_aggregate = (first_tile_size < TILE_ITEMS) ?
BlockReduceT(temp_storage).Reduce(thread_aggregate, reduction_op, first_tile_size) :
BlockReduceT(temp_storage).Reduce(thread_aggregate, reduction_op);
}
/**
* Dequeue and reduce tiles of items as part of a inter-block scan
*/
__device__ __forceinline__ void ConsumeTiles(
SizeT num_items, ///< [in] Total number of global input items
GridEvenShare<SizeT> &even_share, ///< [in] GridEvenShare descriptor
GridQueue<SizeT> &queue, ///< [in,out] GridQueue descriptor
T &block_aggregate, ///< [out] Running total
Int2Type<GRID_MAPPING_DYNAMIC> is_dynamic) ///< [in] Marker type indicating this is a dynamic mapping
{
ConsumeTiles(num_items, queue, block_aggregate);
}
};
} // CUB namespace
CUB_NS_POSTFIX // Optional outer namespace(s)
|
the_stack
|
#include <vector>
namespace hvvr {
struct CudaFormatDescriptor {
uint32_t r = 0, g = 0, b = 0, a = 0;
cudaChannelFormatKind channelType = cudaChannelFormatKindNone;
cudaTextureReadMode readMode = cudaReadModeElementType;
bool sRGB = false;
uint32_t elementSize = 0;
CudaFormatDescriptor() {}
CudaFormatDescriptor(uint32_t r,
uint32_t g,
uint32_t b,
uint32_t a,
cudaChannelFormatKind channelType,
cudaTextureReadMode readMode,
bool sRGB,
uint32_t elementSize)
: r(r), g(g), b(b), a(a), channelType(channelType), readMode(readMode), sRGB(sRGB), elementSize(elementSize) {}
};
static CudaFormatDescriptor formatToDescriptor(TextureFormat format) {
switch (format) {
case TextureFormat::r8g8b8a8_unorm_srgb:
return {8u, 8u, 8u, 8u, cudaChannelFormatKindUnsigned, cudaReadModeNormalizedFloat, true, 4};
case TextureFormat::r8g8b8a8_unorm:
return {8u, 8u, 8u, 8u, cudaChannelFormatKindUnsigned, cudaReadModeNormalizedFloat, false, 4};
case TextureFormat::r16g16b16a16_unorm:
return{ 16u, 16u, 16u, 16u, cudaChannelFormatKindUnsigned, cudaReadModeNormalizedFloat, false, 4 };
case TextureFormat::r32g32b32a32_float:
return {32u, 32u, 32u, 32u, cudaChannelFormatKindFloat, cudaReadModeElementType, false, 16};
case TextureFormat::r16g16b16a16_float:
return {16u, 16u, 16u, 16u, cudaChannelFormatKindFloat, cudaReadModeElementType, false, 8};
case TextureFormat::r11g11b10_float:
return {11u, 11u, 10u, 0u, cudaChannelFormatKindFloat, cudaReadModeElementType, false, 4};
case TextureFormat::r32_float:
return {32u, 0u, 0u, 0u, cudaChannelFormatKindFloat, cudaReadModeElementType, false, 4};
default:
printf("Unhandled texture format\n");
assert(false);
}
return CudaFormatDescriptor();
}
Texture::Texture(const TextureData& textureData) {
_textureID = CreateTexture(textureData);
}
// TODO(anankervis):
Texture::~Texture() {}
cudaTextureObject_t* gDeviceTextureArray;
Texture2D gTextureAtlas[SimpleMaterial::maxTextureCount] = {};
static uint32_t gTextureCount = 0;
CUDA_DEVICE uchar4 to_uchar4(vector4 vec) {
return make_uchar4((uint8_t)vec.x, (uint8_t)vec.y, (uint8_t)vec.z, (uint8_t)vec.w);
}
CUDA_KERNEL void d_mipmap(cudaSurfaceObject_t mipOutput,
cudaTextureObject_t mipInput,
uint32_t imageW,
uint32_t imageH) {
uint32_t x = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t y = blockIdx.y * blockDim.y + threadIdx.y;
float px = 1.0 / float(imageW);
float py = 1.0 / float(imageH);
if ((x < imageW) && (y < imageH)) {
// take the average of 4 samples
// we are using the normalized access to make sure non-power-of-two textures
// behave well when downsized.
vector4 color = vector4(tex2D<float4>(mipInput, (x + 0) * px, (y + 0) * py)) +
vector4(tex2D<float4>(mipInput, (x + 1) * px, (y + 0) * py)) +
vector4(tex2D<float4>(mipInput, (x + 1) * px, (y + 1) * py)) +
vector4(tex2D<float4>(mipInput, (x + 0) * px, (y + 1) * py));
color /= 4.0f;
color *= 255.0f;
color = min(color, 255.0f);
surf2Dwrite(to_uchar4(color), mipOutput, x * sizeof(uchar4), y);
}
}
static void generateMipMaps(cudaMipmappedArray_t mipmapArray, uint32_t width, uint32_t height) {
#ifdef SHOW_MIPMAPS
cudaArray_t levelFirst;
checkCudaErrors(cudaGetMipmappedArrayLevel(&levelFirst, mipmapArray, 0));
#endif
uint32_t level = 0;
while (width != 1 || height != 1) {
width /= 2;
width = max(uint32_t(1), width);
height /= 2;
height = max(uint32_t(1), height);
cudaArray_t levelFrom;
cutilSafeCall(cudaGetMipmappedArrayLevel(&levelFrom, mipmapArray, level));
cudaArray_t levelTo;
cutilSafeCall(cudaGetMipmappedArrayLevel(&levelTo, mipmapArray, level + 1));
cudaExtent levelToSize;
cutilSafeCall(cudaArrayGetInfo(NULL, &levelToSize, NULL, levelTo));
assert(levelToSize.width == width);
assert(levelToSize.height == height);
assert(levelToSize.depth == 0);
// generate texture object for reading
cudaTextureObject_t texInput;
cudaResourceDesc texRes;
memset(&texRes, 0, sizeof(cudaResourceDesc));
texRes.resType = cudaResourceTypeArray;
texRes.res.array.array = levelFrom;
cudaTextureDesc texDescr;
memset(&texDescr, 0, sizeof(cudaTextureDesc));
texDescr.normalizedCoords = 1;
texDescr.filterMode = cudaFilterModeLinear;
texDescr.addressMode[0] = cudaAddressModeClamp;
texDescr.addressMode[1] = cudaAddressModeClamp;
texDescr.addressMode[2] = cudaAddressModeClamp;
texDescr.readMode = cudaReadModeNormalizedFloat;
cutilSafeCall(cudaCreateTextureObject(&texInput, &texRes, &texDescr, NULL));
// generate surface object for writing
cudaSurfaceObject_t surfOutput;
cudaResourceDesc surfRes;
memset(&surfRes, 0, sizeof(cudaResourceDesc));
surfRes.resType = cudaResourceTypeArray;
surfRes.res.array.array = levelTo;
cutilSafeCall(cudaCreateSurfaceObject(&surfOutput, &surfRes));
// run mipmap kernel
dim3 blockSize(16, 16, 1);
dim3 gridSize((width + blockSize.x - 1) / blockSize.x, (height + blockSize.y - 1) / blockSize.y, 1);
d_mipmap<<<gridSize, blockSize>>>(surfOutput, texInput, width, height);
cutilSafeCall(cudaDeviceSynchronize());
cutilSafeCall(cudaGetLastError());
cutilSafeCall(cudaDestroySurfaceObject(surfOutput));
cutilSafeCall(cudaDestroyTextureObject(texInput));
#ifdef SHOW_MIPMAPS
// we blit the current mipmap back into first level
cudaMemcpy3DParms copyParams = {0};
copyParams.dstArray = levelFirst;
copyParams.srcArray = levelTo;
copyParams.extent = make_cudaExtent(width, height, 1);
copyParams.kind = cudaMemcpyDeviceToDevice;
checkCudaErrors(cudaMemcpy3D(©Params));
#endif
level++;
}
}
uint32_t getMipMapLevels(uint32_t width, uint32_t height, uint32_t depth) {
uint32_t sz = max(max(width, height), depth);
uint32_t levels = 0;
while (sz) {
sz /= 2;
levels++;
}
return levels;
}
// CPU allocates resources address
uint32_t CreateTexture(const TextureData& textureData) {
uint32_t depth = 0;
assert(gTextureCount < SimpleMaterial::maxTextureCount - 1); // reserve the last index for SimpleMaterial::badTextureIndex
if (gTextureCount == 0) {
cudaMalloc((void**)(&gDeviceTextureArray), sizeof(cudaTextureObject_t) * SimpleMaterial::maxTextureCount);
}
CudaFormatDescriptor desc = formatToDescriptor(textureData.format);
Texture2D tex;
tex.width = textureData.width;
tex.height = textureData.height;
tex.elementSize = desc.elementSize;
tex.hasMipMaps = true;
tex.format = textureData.format;
cudaChannelFormatDesc chanDesc = cudaCreateChannelDesc(desc.r, desc.g, desc.b, desc.a, desc.channelType);
cudaExtent extents = {textureData.width, textureData.height, depth};
uint32_t levels = 0;
if (tex.hasMipMaps) {
// how many mipmaps we need
levels = getMipMapLevels(textureData.width, textureData.height, depth);
cutilSafeCall(cudaMallocMipmappedArray(&tex.d_rawMipMappedMemory, &chanDesc, extents, levels));
// upload level 0
cutilSafeCall(cudaGetMipmappedArrayLevel(&tex.d_rawMemory, tex.d_rawMipMappedMemory, 0));
} else {
// Create buffer for cuda write
cutilSafeCall(cudaMallocArray(&tex.d_rawMemory, &chanDesc, textureData.width, textureData.height));
}
cudaTextureDesc texDesc = {};
texDesc.addressMode[0] = cudaAddressModeWrap;
texDesc.addressMode[1] = cudaAddressModeWrap;
texDesc.addressMode[2] = cudaAddressModeClamp;
texDesc.filterMode = cudaFilterModeLinear;
texDesc.readMode = desc.readMode;
texDesc.sRGB = desc.sRGB;
texDesc.normalizedCoords = true;
texDesc.maxAnisotropy = 8;
printf("width: %u, height: %u, stride: %u, elementSize: %u\n", textureData.width, textureData.height,
textureData.strideElements, desc.elementSize);
cutilSafeCall(cudaMemcpy2DToArray(tex.d_rawMemory, 0, 0, textureData.data, textureData.strideElements * desc.elementSize,
textureData.width * desc.elementSize, textureData.height,
cudaMemcpyHostToDevice));
cudaResourceDesc resDesc = {};
if (tex.hasMipMaps) {
generateMipMaps(tex.d_rawMipMappedMemory, textureData.width, textureData.height);
resDesc.resType = cudaResourceTypeMipmappedArray;
resDesc.res.mipmap.mipmap = tex.d_rawMipMappedMemory;
texDesc.mipmapFilterMode = cudaFilterModeLinear;
texDesc.maxMipmapLevelClamp = float(levels - 1);
} else {
resDesc.resType = cudaResourceTypeArray;
resDesc.res.array.array = tex.d_rawMemory;
}
// Create Texture Object
cutilSafeCall(cudaCreateTextureObject(&tex.d_texObject, &resDesc, &texDesc, 0));
cutilSafeCall(cudaMemcpy(&gDeviceTextureArray[gTextureCount], &tex.d_texObject, sizeof(cudaTextureObject_t),
cudaMemcpyHostToDevice));
gTextureAtlas[gTextureCount] = tex;
++gTextureCount;
return gTextureCount - 1;
}
void DestroyAllTextures() {
for (uint32_t i = 0; i < gTextureCount; ++i) {
cutilSafeCall(cudaFreeArray(gTextureAtlas[i].d_rawMemory));
cutilSafeCall(cudaDestroyTextureObject(gTextureAtlas[i].d_texObject));
}
cutilSafeCall(cudaFree(gDeviceTextureArray));
gTextureCount = 0;
}
Texture2D createEmptyTexture(uint32_t width,
uint32_t height,
TextureFormat format,
cudaTextureAddressMode xWrapMode,
cudaTextureAddressMode yWrapMode,
bool linearFilter) {
CudaFormatDescriptor desc = formatToDescriptor(format);
Texture2D tex;
tex.width = width;
tex.height = height;
tex.elementSize = desc.elementSize;
tex.hasMipMaps = false;
tex.format = format;
cudaChannelFormatDesc chanDesc = cudaCreateChannelDesc(desc.r, desc.g, desc.b, desc.a, desc.channelType);
// Create buffer for cuda write
cutilSafeCall(cudaMallocArray(&tex.d_rawMemory, &chanDesc, width, height));
cudaTextureDesc texDesc = {};
texDesc.addressMode[0] = xWrapMode;
texDesc.addressMode[1] = yWrapMode;
texDesc.addressMode[2] = cudaAddressModeClamp;
texDesc.filterMode = linearFilter ? cudaFilterModeLinear : cudaFilterModePoint;
texDesc.readMode = desc.readMode;
texDesc.normalizedCoords = true;
texDesc.sRGB = desc.sRGB;
cudaResourceDesc resDesc = {};
resDesc.resType = cudaResourceTypeArray;
resDesc.res.array.array = tex.d_rawMemory;
// Create Texture Object
cutilSafeCall(cudaCreateTextureObject(&tex.d_texObject, &resDesc, &texDesc, 0));
// Create Surface Object
cutilSafeCall(cudaCreateSurfaceObject(&tex.d_surfaceObject, &resDesc));
return tex;
}
CUDA_KERNEL void ClearKernel(Texture2D tex) {
uint32_t x = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < tex.width*tex.elementSize && y < tex.height) {
surf2Dwrite<unsigned char>(0, tex.d_surfaceObject, x, y);
}
}
void clearTexture(Texture2D tex) {
KernelDim dim(tex.width*tex.elementSize, tex.height, 16, 8);
ClearKernel<<<dim.grid, dim.block>>>(tex);
}
} // namespace hvvr
|
the_stack
|
#include "caffe/blob.hpp"
#include "caffe/common.hpp"
#include "caffe/filler.hpp"
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/layers/inter_class_layer.hpp"
//#include "stdio.h"
namespace caffe {
template <typename Dtype>
__global__ void Weight_mean_gpu(int nthreads, const int K_, const int N_,
const Dtype* weight, Dtype* weight_mean) {
CUDA_KERNEL_LOOP(index, nthreads) {
for (int i = 0; i < K_; i++) {
weight_mean[i] += weight[index * K_ + i];
}
}
}
template <typename Dtype>
__global__ void Weight_mean_normA_gpu(int nthreads, Dtype* temp_mean_norm, Dtype* weight_mean) {
temp_mean_norm[0] = (Dtype)0.;
CUDA_KERNEL_LOOP(index, nthreads) {
temp_mean_norm[0] += weight_mean[index] * weight_mean[index];
}
}
template <typename Dtype>
__global__ void Weight_mean_normB_gpu(int nthreads,Dtype* temp_mean_norm, Dtype* weight_mean) {
CUDA_KERNEL_LOOP(index, nthreads) {
weight_mean[index] = weight_mean[index] / sqrt(temp_mean_norm[0] + (Dtype)1e-5);
}
}
/************ Inter class type: Mean ************/
template <typename Dtype>
__global__ void InterClassMean_forward_gpu(int nthreads, const int K_, Dtype alpha_,
const Dtype* label, const Dtype * weight_mean_data, const Dtype * weight,
Dtype* inter_class_dist) {
inter_class_dist[0] = (Dtype)0.; //initialized top[0]
CUDA_KERNEL_LOOP(index, nthreads) {
const int label_value = static_cast<int>(label[index]);
Dtype cosine_dist = (Dtype)0.;
for(int i = 0; i < K_; i++){
cosine_dist += weight_mean_data[i] * weight[label_value * K_ +i];
}
inter_class_dist[0] += (Dtype)1. / (Dtype)nthreads - cosine_dist / (Dtype)nthreads;
}
//inter_class_loss = (Dtype)1. / inter_class_dist[0];
}
template <typename Dtype>
__global__ void InterClassMean_not_forward_gpu(int nthreads, const int K_, Dtype alpha_,
const Dtype* label, const Dtype * weight_mean_data, const Dtype * weight,
Dtype* inter_class_dist) {
inter_class_dist[0] = (Dtype)0.; // initialized top[0]
}
template <typename Dtype>
__global__ void InterClassMean_backward_gpu(int nthreads, const int K_, Dtype alpha_,
const Dtype* label, const Dtype * weight_mean_data, Dtype * weight_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int label_value = static_cast<int>(label[index]);
for(int i = 0; i < K_; i++){
weight_diff[label_value * K_ + i] += (Dtype)1.*alpha_*weight_mean_data[i]/nthreads;
}
}
}
/****end****** Inter class type: Mean ************/
/************ Inter class type: Among ************/
template <typename Dtype>
__global__ void InterClassAmong_forward_gpu(int nthreads, const int K_, Dtype alpha_,
Dtype* weight_wise_dist_sq, Dtype* weight_wise_diff_data,
const Dtype * weight, Dtype* inter_class_dist) {
// inter_class_dist approximates a constant
// because weights have been normalized and the number of weights is large enough
inter_class_dist[0] = (Dtype)0.;
Dtype tmp = (Dtype)0.;
CUDA_KERNEL_LOOP(i, nthreads) {
for (int j = 0; j < nthreads; j++){
if(i != j){
for (int k = 0; k < K_; k++){
tmp += pow((weight[i * K_ + k] - weight[j * K_ + k]),2);
}
}
}
}
inter_class_dist[0] = tmp / (Dtype)nthreads;
weight_wise_dist_sq[0] = tmp / (Dtype)nthreads; //storage dist
//inter_class_loss = (Dtype)1. * nthreads / (tmp + (Dtype)1e-5); // minimize inter_class_loss
}
template <typename Dtype>
__global__ void InterClassAmong_batch_forward_gpu(int nthreads, const int K_, Dtype alpha_,
Dtype* weight_wise_dist_sq, Dtype* weight_wise_diff_data,
const Dtype * weight, Dtype* inter_class_dist,const Dtype* label,
const int M_) {
inter_class_dist[0] = (Dtype)0.;
weight_wise_dist_sq[0] = (Dtype)0.;
CUDA_KERNEL_LOOP(i, nthreads) {
Dtype tmp = (Dtype)0.;
for (int j = 0; j < M_; j++){
const int label_value = static_cast<int>(label[j]);
for (int k = 0; k < K_; k++){
tmp += pow((weight[i * K_ + k] - weight[label_value * K_ + k]),2);
}
}
inter_class_dist[0] += tmp / (Dtype)nthreads;
weight_wise_dist_sq[0] += tmp / (Dtype)nthreads; //storage dist
//inter_class_loss += (Dtype)1. * nthreads / (tmp + (Dtype)1e-5); //minimize inter class loss
}
}
template <typename Dtype>
__global__ void InterClassAmong_not_forward_gpu(int nthreads, const int K_, Dtype alpha_,
Dtype* weight_wise_dist_sq, Dtype* weight_wise_diff_data,
const Dtype * weight, Dtype* inter_class_dist) {
inter_class_dist[0] = (Dtype)0.;
}
//template <typename Dtype> // too slow O(n^3)
//__global__ void InterClassAmong_backward_gpu(int nthreads, const int K_, Dtype alpha_,
// const Dtype* weight, const Dtype* weight_wise_dist_sq, Dtype * weight_diff,
// const Dtype* inter_class_dist) {
// //Dtype temp_coff = pow(weight_wise_dist_sq[0],2);
// //Dtype total_coff = (Dtype)-4. * alpha_ / (temp_coff + (Dtype)1e-5);
// Dtype total_coff = (Dtype)-4. * alpha_;
// CUDA_KERNEL_LOOP(i, nthreads) {
// for (int j = 0; j < nthreads; j++){
// if(i != j){
// for (int k = 0; k < K_; k++){
// weight_diff[i * K_ + k] += (weight[i * K_ + k] - weight[j * K_ + k]) * total_coff;
// }
// }
// }
// }
//}
template <typename Dtype> // faster implement O(n^2)
__global__ void InterClassAmong_backward_gpu(int nthreads, const int K_, Dtype alpha_,
const Dtype* weight, const Dtype * weight_mean_data, const Dtype* weight_wise_dist_sq,
Dtype * weight_diff) {
Dtype temp_coff = pow(weight_wise_dist_sq[0],2);
Dtype total_coff = (Dtype)-4. * alpha_ / (temp_coff + (Dtype)1e-5);
CUDA_KERNEL_LOOP(index, nthreads) {
for (int i = 0; i < K_; i++){
weight_diff[index * K_ + i] += total_coff * (weight[index * K_ + i] - weight_mean_data[i]);
}
}
}
template <typename Dtype> // faster implement O(n^2) // only minibatch
__global__ void InterClassAmong_batch_backward_gpu(int nthreads, const int K_, Dtype alpha_,
const Dtype* weight, const Dtype * weight_mean_data, const Dtype* weight_wise_dist_sq,
Dtype * weight_diff,const Dtype* label) {
Dtype temp_coff = pow(weight_wise_dist_sq[0],2);
Dtype total_coff = (Dtype)-4. * (Dtype)alpha_/((Dtype)nthreads* temp_coff + (Dtype)1e-5);
CUDA_KERNEL_LOOP(index, nthreads) {
const int label_value = static_cast<int>(label[index]);
for (int i = 0; i < K_; i++){
weight_diff[label_value * K_ + i] += total_coff * (weight[label_value * K_ + i] - weight_mean_data[i]);
}
}
}
/****end***** Inter class type: Among ************/
template <typename Dtype>
void InterClassLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
iter_ += (Dtype)1.;
Dtype alpha_start_iter_ = this->layer_param_.inter_class_param().alpha_start_iter();
Dtype alpha_start_value_ = this->layer_param_.inter_class_param().alpha_start_value();
Dtype alpha_step_ = this->layer_param_.inter_class_param().alpha_step();
Dtype alpha_stepvalue_size = this->layer_param_.inter_class_param().alpha_stepvalue_size();
Dtype normalize_ = this->layer_param_.inter_class_param().normalize();
if (alpha_stepvalue_size != 0){
const int* alpha_stepvalue_data = alpha_stepvalues.cpu_data();
if (alpha_start_iter_ == iter_){
alpha_ = alpha_start_value_;
}
else if(alpha_start_iter_ < iter_) {
if(alpha_stepvalue_data[alpha_index_] == iter_ && alpha_index_<alpha_stepvalue_size){
alpha_ += alpha_step_;
alpha_index_ += (Dtype)1.;
}
}
}
else{
if (alpha_start_iter_ == iter_){
alpha_ = alpha_start_value_;
}
}
if (top.size() == 2) {
top[1]->mutable_cpu_data()[0] = alpha_;
}
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* weight = this->blobs_[0]->gpu_data();
const Dtype* label = bottom[1]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int nthreads = N_;
switch (type_) {
case InterClassParameter_InterClassType_MEAN:{
// weight_mean_ = (\Sigma w) / N_
nthreads = N_;
Weight_mean_gpu<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, K_, N_, weight,
weight_mean_.mutable_gpu_data());
// weight_mean_norm = weight_mean_ / ||weight_mean_||
if(normalize_){
nthreads=K_;
Weight_mean_normA_gpu<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, temp_mean_norm_gpu.mutable_gpu_data(),
weight_mean_.mutable_gpu_data());
Weight_mean_normB_gpu<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, temp_mean_norm_gpu.mutable_gpu_data(),
weight_mean_.mutable_gpu_data());
}
// compute inter_class_dist
if(iter_ % 10 == 1){
nthreads = M_;
const Dtype* weight_mean_data = weight_mean_.gpu_data();
InterClassMean_forward_gpu<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, K_, alpha_, label, weight_mean_data,
weight, top_data);
}
else{
nthreads = M_;
const Dtype* weight_mean_data = weight_mean_.gpu_data();
//not compute inter_class_dist
InterClassMean_not_forward_gpu<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, K_, alpha_, label, weight_mean_data,
weight, top_data);
}
break;
}
case InterClassParameter_InterClassType_AMONG:{
nthreads = N_;
Weight_mean_gpu<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, K_, N_, weight,
weight_mean_.mutable_gpu_data());
if(normalize_){
nthreads=K_;
Weight_mean_normA_gpu<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, temp_mean_norm_gpu.mutable_gpu_data(),
weight_mean_.mutable_gpu_data());
Weight_mean_normB_gpu<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, temp_mean_norm_gpu.mutable_gpu_data(),
weight_mean_.mutable_gpu_data());
}
if(iter_ % 10 == 1){ // iter_size == 1
// because forward propagation is very slow
// use the same interclass_dist 10 times when back propagation
//if(iter_%20==1 ||iter_%20==2){ // iter_size == 2
//compute inter_class_dist
nthreads = N_;
// computing all weights is very slow
//InterClassAmong_forward_gpu<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
// CAFFE_CUDA_NUM_THREADS>>>(nthreads, K_, alpha_, weight_wise_dist_sq_.mutable_gpu_data(),
// weight_wise_diff_.mutable_gpu_data(), weight, top_data);
// computing weights of minibatch approximates computing all weights
InterClassAmong_batch_forward_gpu<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, K_, alpha_, weight_wise_dist_sq_.mutable_gpu_data(),
weight_wise_diff_.mutable_gpu_data(), weight, top_data,label,M_);
}//
else{
// not compute inter_class_dist
// use the same interclass_dist, saving in weight_wise_dist_sq, 10 times when back propagation
nthreads = N_;
InterClassAmong_not_forward_gpu<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, K_, alpha_, weight_wise_dist_sq_.mutable_gpu_data(),
weight_wise_diff_.mutable_gpu_data(), weight, top_data);
}
break;
}
default:{
LOG(FATAL) << "Unknown InterClassType.";
}
}
}
template <typename Dtype>
void InterClassLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* label = bottom[1]->gpu_data();
const Dtype* weight = this->blobs_[0]->gpu_data();
Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff();
const Dtype* top_data = top[0]->gpu_data();
if (this->param_propagate_down_[0]) {
switch (type_) {
case InterClassParameter_InterClassType_MEAN:{
const Dtype* weight_mean_data = weight_mean_.gpu_data();
int nthreads = M_;
// maximize inter_class_dist
InterClassMean_backward_gpu<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, K_, alpha_, label,
weight_mean_data, weight_diff);
break;
}
case InterClassParameter_InterClassType_AMONG:{
int nthreads = N_;
// maximize inter_class_dist O(n^3)
//InterClassAmong_backward_gpu<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
// CAFFE_CUDA_NUM_THREADS>>>(nthreads, K_, alpha_, weight, weight_wise_dist_sq_.gpu_data(),
// weight_diff, top_data);
// maximize inter_class_dist O(n^2)
//InterClassAmong_backward_gpu<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
// CAFFE_CUDA_NUM_THREADS>>>(nthreads, K_, alpha_, weight, weight_mean_.gpu_data(),
// weight_wise_dist_sq_.gpu_data(), weight_diff);
// only update minibatch inter_class_dist
nthreads = M_;
const Dtype* weight_mean_data = weight_mean_.gpu_data();
InterClassAmong_batch_backward_gpu<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, K_, alpha_, weight, weight_mean_.gpu_data(),
weight_wise_dist_sq_.gpu_data(), weight_diff, label);
break;
}
default:{
LOG(FATAL) << "Unknown InterClassType.";
}
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(InterClassLayer);
} // namespace caffe
|
the_stack
|
// Nearest lower power of 2
__device__ __inline__ uint flp2 (uint x)
{
return (0x80000000u >> __clz(x));
}
//Computes the squared difference between two numbers
template<typename T>
__device__ __inline__ T L2p2(const T i1, const T i2)
{
T diff = i1 - i2;
return diff*diff;
}
/*
Adds new patch to patch stack (only N most similar are kept)
Note: Stack is just an array, not FIFO
*/
__device__
void add_to_matched_image(
uint *stack, //IN/OUT: Stack of N patches matched to current reference patch
uchar *num_patches_in_stack,//IN/OUT: Number of patches in stack
const uint value, //IN: [..DIFF(ushort)..|..LOC_Y(sbyte)..|..LOC_X(sbyte)..]
const Params & params //IN: Denoising parameters
)
{
//stack[*num_patches_in_stack-1] is most similar (lowest number)
int k;
uchar num = (*num_patches_in_stack);
if (num < params.N) //add new value
{
k = num++;
while(k > 0 && value > stack[k-1])
{
stack[k] = stack[k-1];
--k;
}
stack[k] = value;
*num_patches_in_stack = num;
}
else if (value >= stack[0])
return;
else //delete highest value and add new
{
k = 1;
while (k < params.N && value < stack[k])
{
stack[k-1] = stack[k];
k++;
}
stack[k-1] = value;
}
}
/*
Block-matching algorithm
For each processed reference patch it finds maximaly N similar patches that pass the distance threshold and stores them to the g_stacks array.
It also returns the number of them for each reference patch in g_num_patches_in_stack.
Used denoising parameters: n,k,N,T,p
Division: Kernel handles gridDim.y lines starting with the line passed in argument. Each block handles warpSize reference patches in line.
Each thread process one reference patch. All the warps of a block process the same reference patches.
*/
__global__
void block_matching(
const uchar* __restrict image, //IN: Original image
ushort* __restrict g_stacks, //OUT: For each reference patch contains addresses of similar patches (patch is adressed by top left corner) [..LOC_Y(sbyte)..|..LOC_X(sbyte)..]
uint* __restrict g_num_patches_in_stack, //OUT: For each reference patch contains number of similar patches
const uint2 image_dim, //IN: Image dimensions
const uint2 stacks_dim, //IN: Size of area, where reference patches could be located
const Params params, //IN: Denoising parameters
const uint2 start_point) //IN: Address of the top-left reference patch of a batch
{
//One block is processing warpSize patches (because each warp is computing distance of same warpSize patches from different displaced patches)
int tid = threadIdx.x % warpSize;
int wid = threadIdx.x / warpSize;
int num_warps = blockDim.x/warpSize;
//p_block denotes reference rectangle on which current cuda block is computing
uint p_rectangle_width = ((warpSize-1) * params.p) + params.k;
uint p_rectangle_start = start_point.x + blockIdx.x * warpSize * params.p;
//Shared arrays
extern __shared__ uint s_data[];
uint *s_diff = s_data; //SIZE: p_rectangle_width*num_warps
uint *s_stacks = &s_data[p_rectangle_width*num_warps]; //SIZE: params.N*num_warps*warpSize
uchar *s_patches_in_stack = (uchar*)&s_data[num_warps*(p_rectangle_width + params.N*warpSize)]; //SIZE: num_warps*warpSize
uchar *s_image_p = (uchar*)&s_patches_in_stack[num_warps*warpSize]; //SIZE: p_rectangle_width*params.k
s_diff += idx2(0, wid, p_rectangle_width);
//Initialize s_patches_in_stack to zero
s_patches_in_stack[ idx2(tid, wid, warpSize) ] = 0;
int2 p; //Address of reference patch
int2 q; //Address of patch against which the difference is computed
p.x = p_rectangle_start + (tid*params.p);
p.y = start_point.y + (blockIdx.y*params.p);
//Ensure, that the bottom most patches will be taken as reference patches regardless the p parameter.
if (p.y >= stacks_dim.y && p.y < stacks_dim.y + params.p - 1)
p.y = stacks_dim.y - 1;
else if (p.y >= stacks_dim.y) return;
//Ensure, that the right most patches will be taken as reference patches regardless the p parameter.
uint inner_p_x = tid*params.p;
if (p.x >= stacks_dim.x && p.x < stacks_dim.x + params.p - 1)
{
inner_p_x -= (p.x - (stacks_dim.x - 1));
p.x = stacks_dim.x - 1;
}
//Load reference patches needed by actual block to shared memory
for(int i = threadIdx.x; i < p_rectangle_width*params.k; i+=blockDim.x)
{
int sx = i % p_rectangle_width;
int sy = i / p_rectangle_width;
if (p_rectangle_start+sx >= image_dim.x) continue;
s_image_p[i] = image[idx2(p_rectangle_start+sx,p.y+sy,image_dim.x)];
}
__syncthreads();
//scale difference so that it can fit ushort
uint shift = (__clz(params.Tn) < 16u) ? 16u - (uint)__clz(params.Tn) : 0;
//Ensure that displaced patch coordinates (q) will be positive
int2 from;
from.y = (p.y - (int)params.n < 0) ? -p.y : -(int)params.n;
from.x = (((int)p_rectangle_start) - (int)params.n < 0) ? -((int)p_rectangle_start) : -(int)params.n;
from.x += wid;
//For each displacement (x,y) in n neighbourhood
for(int y = from.y; y <= (int)params.n; ++y)
{
q.y = p.y + y;
if (q.y >= stacks_dim.y) break;
for(int x = from.x; x <= (int)params.n; x += num_warps)
{
//Reference patch is always the most similar to itself (there is no need to copute it)
if (x == 0 && y == 0) continue;
//Each warp is computing the same patch with slightly different displacement.
//Compute distance of reference patch p from current patch q which is dispaced by (x+tid,y)
//q_block denotes displaced rectangle which is processed by the current warp
uint q_rectangle_start = p_rectangle_start + x;
q.x = q_rectangle_start + inner_p_x;
//Compute distance for each column of reference patch
for(uint i = tid; i < p_rectangle_width && p_rectangle_start+i < image_dim.x &&
q_rectangle_start+i < image_dim.x; i+=warpSize)
{
uint dist = 0;
for(uint iy = 0; iy < params.k; ++iy)
{
dist += L2p2((int)s_image_p[ idx2(i, iy, p_rectangle_width) ],
(int)image[ idx2(q_rectangle_start+i, q.y+iy, image_dim.x) ]);
}
s_diff[i] = dist;
}
if (p.x >= stacks_dim.x || q.x >= stacks_dim.x) continue;
//Sum column distances to obtain patch distance
uint diff = 0;
for (uint i = 0; i < params.k; ++i)
diff += s_diff[inner_p_x + i];
//Distance threshold
if(diff < params.Tn)
{
uint loc_y = (uint)((q.y - p.y) & 0xFF); //relative location y (-127 to 127)
uint loc_x = (uint)((q.x - p.x) & 0xFF); //relative location x (-127 to 127)
diff >>= shift;
diff <<= 16u; // [..DIFF(ushort)..|..LOC_Y(sbyte)..|..LOC_X(sbyte)..]
diff |= (loc_y << 8u);
diff |= loc_x;
//Add current patch to s_stacks
add_to_matched_image(
&s_stacks[ params.N * idx2(tid, wid, warpSize) ],
&s_patches_in_stack[ idx2(tid, wid, warpSize) ],
diff,
params
);
}
}
}
__syncthreads();
uint batch_size = gridDim.x*warpSize;
uint block_address_x = blockIdx.x*warpSize+tid;
if (wid > 0) return;
//Select N most similar patches for each reference patch from stacks in shared memory and save them to global memory
//Each thread represents one reference patch
//Each thread will find N most similar blocks in num_warps stacks (which were computed by different warps) and save them into global memory
//In shared memory the most similar patch is at the end, in global memory the order does not matter
//DEV: performance impact cca 8%
if (p.x >= stacks_dim.x) return;
int j;
for (j = 0; j < params.N; ++j)
{
uint count = 0;
uint minIdx = 0;
uint minVal = 0xFFFFFFFF; //INF
//Finds patch with minimal value of remaining
for (int i = minIdx; i < num_warps; ++i)
{
count = (uint)s_patches_in_stack[ idx2(tid, i, warpSize) ];
if (count == 0) continue;
uint newMinVal = s_stacks[ idx3(count-1,tid,i,params.N,warpSize) ];
if (newMinVal < minVal)
{
minVal = newMinVal;
minIdx = i;
}
}
if (minVal == 0xFFFFFFFF) break; //All stacks are empty
//Remove patch from shared stack
s_patches_in_stack[ idx2(tid, minIdx, warpSize) ]--;
//Adds patch to stack in global memory
g_stacks[idx3(j, block_address_x, blockIdx.y, params.N, batch_size)] = (ushort)(minVal & 0xFFFF);
}
//Save to the global memory the number of similar patches rounded to the nearest lower power of two
g_num_patches_in_stack[ idx2(block_address_x ,blockIdx.y, batch_size) ] = flp2((uint)j+1)-1;
}
extern "C" void run_block_matching(
const uchar* __restrict image, //Original image
ushort* __restrict stacks, //For each reference patch contains addresses of similar patches (patch is adressed by top left corner)
uint* __restrict num_patches_in_stack, //For each reference patch contains number of similar patches
const uint2 image_dim, //Image dimensions
const uint2 stacks_dim, //size of area where reference patches could be located
const Params params, //Denoising parameters
const uint2 start_point, //Address of the top-left reference patch of a batch
const dim3 num_threads,
const dim3 num_blocks,
const uint shared_memory_size
)
{
hipLaunchKernelGGL(block_matching, num_blocks, num_threads, shared_memory_size, 0,
image,
stacks,
num_patches_in_stack,
image_dim,
stacks_dim,
params,
start_point
);
}
|
the_stack
|
* \file
* Operations for reading linear tiles of data into the CUDA thread block.
*/
#pragma once
#include <iterator>
#include <type_traits>
#include "block_exchange.cuh"
#include "../iterator/cache_modified_input_iterator.cuh"
#include "../config.cuh"
#include "../util_ptx.cuh"
#include "../util_type.cuh"
CUB_NAMESPACE_BEGIN
/**
* \addtogroup UtilIo
* @{
*/
/******************************************************************//**
* \name Blocked arrangement I/O (direct)
*********************************************************************/
//@{
/**
* \brief Load a linear segment of items into a blocked arrangement across the thread block.
*
* \blocked
*
* \tparam T <b>[inferred]</b> The data type to load.
* \tparam ITEMS_PER_THREAD <b>[inferred]</b> The number of consecutive items partitioned onto each thread.
* \tparam InputIteratorT <b>[inferred]</b> The random-access iterator type for input \iterator.
*/
template <
typename InputT,
int ITEMS_PER_THREAD,
typename InputIteratorT>
__device__ __forceinline__ void LoadDirectBlocked(
int linear_tid, ///< [in] A suitable 1D thread-identifier for the calling thread (e.g., <tt>(threadIdx.y * blockDim.x) + linear_tid</tt> for 2D thread blocks)
InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from
InputT (&items)[ITEMS_PER_THREAD]) ///< [out] Data to load
{
// Load directly in thread-blocked order
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++)
{
items[ITEM] = block_itr[(linear_tid * ITEMS_PER_THREAD) + ITEM];
}
}
/**
* \brief Load a linear segment of items into a blocked arrangement across the thread block, guarded by range.
*
* \blocked
*
* \tparam T <b>[inferred]</b> The data type to load.
* \tparam ITEMS_PER_THREAD <b>[inferred]</b> The number of consecutive items partitioned onto each thread.
* \tparam InputIteratorT <b>[inferred]</b> The random-access iterator type for input \iterator.
*/
template <
typename InputT,
int ITEMS_PER_THREAD,
typename InputIteratorT>
__device__ __forceinline__ void LoadDirectBlocked(
int linear_tid, ///< [in] A suitable 1D thread-identifier for the calling thread (e.g., <tt>(threadIdx.y * blockDim.x) + linear_tid</tt> for 2D thread blocks)
InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from
InputT (&items)[ITEMS_PER_THREAD], ///< [out] Data to load
int valid_items) ///< [in] Number of valid items to load
{
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++)
{
if ((linear_tid * ITEMS_PER_THREAD) + ITEM < valid_items)
{
items[ITEM] = block_itr[(linear_tid * ITEMS_PER_THREAD) + ITEM];
}
}
}
/**
* \brief Load a linear segment of items into a blocked arrangement across the thread block, guarded by range, with a fall-back assignment of out-of-bound elements..
*
* \blocked
*
* \tparam T <b>[inferred]</b> The data type to load.
* \tparam ITEMS_PER_THREAD <b>[inferred]</b> The number of consecutive items partitioned onto each thread.
* \tparam InputIteratorT <b>[inferred]</b> The random-access iterator type for input \iterator.
*/
template <
typename InputT,
typename DefaultT,
int ITEMS_PER_THREAD,
typename InputIteratorT>
__device__ __forceinline__ void LoadDirectBlocked(
int linear_tid, ///< [in] A suitable 1D thread-identifier for the calling thread (e.g., <tt>(threadIdx.y * blockDim.x) + linear_tid</tt> for 2D thread blocks)
InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from
InputT (&items)[ITEMS_PER_THREAD], ///< [out] Data to load
int valid_items, ///< [in] Number of valid items to load
DefaultT oob_default) ///< [in] Default value to assign out-of-bound items
{
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++)
items[ITEM] = oob_default;
LoadDirectBlocked(linear_tid, block_itr, items, valid_items);
}
#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document
/**
* Internal implementation for load vectorization
*/
template <
CacheLoadModifier MODIFIER,
typename T,
int ITEMS_PER_THREAD>
__device__ __forceinline__ void InternalLoadDirectBlockedVectorized(
int linear_tid, ///< [in] A suitable 1D thread-identifier for the calling thread (e.g., <tt>(threadIdx.y * blockDim.x) + linear_tid</tt> for 2D thread blocks)
T *block_ptr, ///< [in] Input pointer for loading from
T (&items)[ITEMS_PER_THREAD]) ///< [out] Data to load
{
// Biggest memory access word that T is a whole multiple of
typedef typename UnitWord<T>::DeviceWord DeviceWord;
enum
{
TOTAL_WORDS = sizeof(items) / sizeof(DeviceWord),
VECTOR_SIZE = (TOTAL_WORDS % 4 == 0) ?
4 :
(TOTAL_WORDS % 2 == 0) ?
2 :
1,
VECTORS_PER_THREAD = TOTAL_WORDS / VECTOR_SIZE,
};
// Vector type
typedef typename CubVector<DeviceWord, VECTOR_SIZE>::Type Vector;
// Vector items
Vector vec_items[VECTORS_PER_THREAD];
// Aliased input ptr
Vector* vec_ptr = reinterpret_cast<Vector*>(block_ptr) + (linear_tid * VECTORS_PER_THREAD);
// Load directly in thread-blocked order
#pragma unroll
for (int ITEM = 0; ITEM < VECTORS_PER_THREAD; ITEM++)
{
vec_items[ITEM] = ThreadLoad<MODIFIER>(vec_ptr + ITEM);
}
// Copy
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++)
{
items[ITEM] = *(reinterpret_cast<T*>(vec_items) + ITEM);
}
}
#endif // DOXYGEN_SHOULD_SKIP_THIS
/**
* \brief Load a linear segment of items into a blocked arrangement across the thread block.
*
* \blocked
*
* The input offset (\p block_ptr + \p block_offset) must be quad-item aligned
*
* The following conditions will prevent vectorization and loading will fall back to cub::BLOCK_LOAD_DIRECT:
* - \p ITEMS_PER_THREAD is odd
* - The data type \p T is not a built-in primitive or CUDA vector type (e.g., \p short, \p int2, \p double, \p float2, etc.)
*
* \tparam T <b>[inferred]</b> The data type to load.
* \tparam ITEMS_PER_THREAD <b>[inferred]</b> The number of consecutive items partitioned onto each thread.
*/
template <
typename T,
int ITEMS_PER_THREAD>
__device__ __forceinline__ void LoadDirectBlockedVectorized(
int linear_tid, ///< [in] A suitable 1D thread-identifier for the calling thread (e.g., <tt>(threadIdx.y * blockDim.x) + linear_tid</tt> for 2D thread blocks)
T *block_ptr, ///< [in] Input pointer for loading from
T (&items)[ITEMS_PER_THREAD]) ///< [out] Data to load
{
InternalLoadDirectBlockedVectorized<LOAD_DEFAULT>(linear_tid, block_ptr, items);
}
//@} end member group
/******************************************************************//**
* \name Striped arrangement I/O (direct)
*********************************************************************/
//@{
/**
* \brief Load a linear segment of items into a striped arrangement across the thread block.
*
* \striped
*
* \tparam BLOCK_THREADS The thread block size in threads
* \tparam T <b>[inferred]</b> The data type to load.
* \tparam ITEMS_PER_THREAD <b>[inferred]</b> The number of consecutive items partitioned onto each thread.
* \tparam InputIteratorT <b>[inferred]</b> The random-access iterator type for input \iterator.
*/
template <
int BLOCK_THREADS,
typename InputT,
int ITEMS_PER_THREAD,
typename InputIteratorT>
__device__ __forceinline__ void LoadDirectStriped(
int linear_tid, ///< [in] A suitable 1D thread-identifier for the calling thread (e.g., <tt>(threadIdx.y * blockDim.x) + linear_tid</tt> for 2D thread blocks)
InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from
InputT (&items)[ITEMS_PER_THREAD]) ///< [out] Data to load
{
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++)
{
items[ITEM] = block_itr[linear_tid + ITEM * BLOCK_THREADS];
}
}
/**
* \brief Load a linear segment of items into a striped arrangement across the thread block, guarded by range
*
* \striped
*
* \tparam BLOCK_THREADS The thread block size in threads
* \tparam T <b>[inferred]</b> The data type to load.
* \tparam ITEMS_PER_THREAD <b>[inferred]</b> The number of consecutive items partitioned onto each thread.
* \tparam InputIteratorT <b>[inferred]</b> The random-access iterator type for input \iterator.
*/
template <
int BLOCK_THREADS,
typename InputT,
int ITEMS_PER_THREAD,
typename InputIteratorT>
__device__ __forceinline__ void LoadDirectStriped(
int linear_tid, ///< [in] A suitable 1D thread-identifier for the calling thread (e.g., <tt>(threadIdx.y * blockDim.x) + linear_tid</tt> for 2D thread blocks)
InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from
InputT (&items)[ITEMS_PER_THREAD], ///< [out] Data to load
int valid_items) ///< [in] Number of valid items to load
{
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++)
{
if (linear_tid + (ITEM * BLOCK_THREADS) < valid_items)
{
items[ITEM] = block_itr[linear_tid + ITEM * BLOCK_THREADS];
}
}
}
/**
* \brief Load a linear segment of items into a striped arrangement across the thread block, guarded by range, with a fall-back assignment of out-of-bound elements.
*
* \striped
*
* \tparam BLOCK_THREADS The thread block size in threads
* \tparam T <b>[inferred]</b> The data type to load.
* \tparam ITEMS_PER_THREAD <b>[inferred]</b> The number of consecutive items partitioned onto each thread.
* \tparam InputIteratorT <b>[inferred]</b> The random-access iterator type for input \iterator.
*/
template <
int BLOCK_THREADS,
typename InputT,
typename DefaultT,
int ITEMS_PER_THREAD,
typename InputIteratorT>
__device__ __forceinline__ void LoadDirectStriped(
int linear_tid, ///< [in] A suitable 1D thread-identifier for the calling thread (e.g., <tt>(threadIdx.y * blockDim.x) + linear_tid</tt> for 2D thread blocks)
InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from
InputT (&items)[ITEMS_PER_THREAD], ///< [out] Data to load
int valid_items, ///< [in] Number of valid items to load
DefaultT oob_default) ///< [in] Default value to assign out-of-bound items
{
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++)
items[ITEM] = oob_default;
LoadDirectStriped<BLOCK_THREADS>(linear_tid, block_itr, items, valid_items);
}
//@} end member group
/******************************************************************//**
* \name Warp-striped arrangement I/O (direct)
*********************************************************************/
//@{
/**
* \brief Load a linear segment of items into a warp-striped arrangement across the thread block.
*
* \warpstriped
*
* \par Usage Considerations
* The number of threads in the thread block must be a multiple of the architecture's warp size.
*
* \tparam T <b>[inferred]</b> The data type to load.
* \tparam ITEMS_PER_THREAD <b>[inferred]</b> The number of consecutive items partitioned onto each thread.
* \tparam InputIteratorT <b>[inferred]</b> The random-access iterator type for input \iterator.
*/
template <
typename InputT,
int ITEMS_PER_THREAD,
typename InputIteratorT>
__device__ __forceinline__ void LoadDirectWarpStriped(
int linear_tid, ///< [in] A suitable 1D thread-identifier for the calling thread (e.g., <tt>(threadIdx.y * blockDim.x) + linear_tid</tt> for 2D thread blocks)
InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from
InputT (&items)[ITEMS_PER_THREAD]) ///< [out] Data to load
{
int tid = linear_tid & (CUB_PTX_WARP_THREADS - 1);
int wid = linear_tid >> CUB_PTX_LOG_WARP_THREADS;
int warp_offset = wid * CUB_PTX_WARP_THREADS * ITEMS_PER_THREAD;
// Load directly in warp-striped order
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++)
{
new(&items[ITEM]) InputT(block_itr[warp_offset + tid + (ITEM * CUB_PTX_WARP_THREADS)]);
}
}
/**
* \brief Load a linear segment of items into a warp-striped arrangement across the thread block, guarded by range
*
* \warpstriped
*
* \par Usage Considerations
* The number of threads in the thread block must be a multiple of the architecture's warp size.
*
* \tparam T <b>[inferred]</b> The data type to load.
* \tparam ITEMS_PER_THREAD <b>[inferred]</b> The number of consecutive items partitioned onto each thread.
* \tparam InputIteratorT <b>[inferred]</b> The random-access iterator type for input \iterator.
*/
template <
typename InputT,
int ITEMS_PER_THREAD,
typename InputIteratorT>
__device__ __forceinline__ void LoadDirectWarpStriped(
int linear_tid, ///< [in] A suitable 1D thread-identifier for the calling thread (e.g., <tt>(threadIdx.y * blockDim.x) + linear_tid</tt> for 2D thread blocks)
InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from
InputT (&items)[ITEMS_PER_THREAD], ///< [out] Data to load
int valid_items) ///< [in] Number of valid items to load
{
int tid = linear_tid & (CUB_PTX_WARP_THREADS - 1);
int wid = linear_tid >> CUB_PTX_LOG_WARP_THREADS;
int warp_offset = wid * CUB_PTX_WARP_THREADS * ITEMS_PER_THREAD;
// Load directly in warp-striped order
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++)
{
if (warp_offset + tid + (ITEM * CUB_PTX_WARP_THREADS) < valid_items)
{
new(&items[ITEM]) InputT(block_itr[warp_offset + tid + (ITEM * CUB_PTX_WARP_THREADS)]);
}
}
}
/**
* \brief Load a linear segment of items into a warp-striped arrangement across the thread block, guarded by range, with a fall-back assignment of out-of-bound elements.
*
* \warpstriped
*
* \par Usage Considerations
* The number of threads in the thread block must be a multiple of the architecture's warp size.
*
* \tparam T <b>[inferred]</b> The data type to load.
* \tparam ITEMS_PER_THREAD <b>[inferred]</b> The number of consecutive items partitioned onto each thread.
* \tparam InputIteratorT <b>[inferred]</b> The random-access iterator type for input \iterator.
*/
template <
typename InputT,
typename DefaultT,
int ITEMS_PER_THREAD,
typename InputIteratorT>
__device__ __forceinline__ void LoadDirectWarpStriped(
int linear_tid, ///< [in] A suitable 1D thread-identifier for the calling thread (e.g., <tt>(threadIdx.y * blockDim.x) + linear_tid</tt> for 2D thread blocks)
InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from
InputT (&items)[ITEMS_PER_THREAD], ///< [out] Data to load
int valid_items, ///< [in] Number of valid items to load
DefaultT oob_default) ///< [in] Default value to assign out-of-bound items
{
// Load directly in warp-striped order
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++)
items[ITEM] = oob_default;
LoadDirectWarpStriped(linear_tid, block_itr, items, valid_items);
}
//@} end member group
/** @} */ // end group UtilIo
//-----------------------------------------------------------------------------
// Generic BlockLoad abstraction
//-----------------------------------------------------------------------------
/**
* \brief cub::BlockLoadAlgorithm enumerates alternative algorithms for cub::BlockLoad to read a linear segment of data from memory into a blocked arrangement across a CUDA thread block.
*/
/**
* \brief cub::BlockLoadAlgorithm enumerates alternative algorithms for cub::BlockLoad to read a linear segment of data from memory into a blocked arrangement across a CUDA thread block.
*/
enum BlockLoadAlgorithm
{
/**
* \par Overview
*
* A [<em>blocked arrangement</em>](index.html#sec5sec3) of data is read
* directly from memory.
*
* \par Performance Considerations
* - The utilization of memory transactions (coalescing) decreases as the
* access stride between threads increases (i.e., the number items per thread).
*/
BLOCK_LOAD_DIRECT,
/**
* \par Overview
*
* A [<em>striped arrangement</em>](index.html#sec5sec3) of data is read
* directly from memory.
*
* \par Performance Considerations
* - The utilization of memory transactions (coalescing) decreases as the
* access stride between threads increases (i.e., the number items per thread).
*/
BLOCK_LOAD_STRIPED,
/**
* \par Overview
*
* A [<em>blocked arrangement</em>](index.html#sec5sec3) of data is read
* from memory using CUDA's built-in vectorized loads as a coalescing optimization.
* For example, <tt>ld.global.v4.s32</tt> instructions will be generated
* when \p T = \p int and \p ITEMS_PER_THREAD % 4 == 0.
*
* \par Performance Considerations
* - The utilization of memory transactions (coalescing) remains high until the the
* access stride between threads (i.e., the number items per thread) exceeds the
* maximum vector load width (typically 4 items or 64B, whichever is lower).
* - The following conditions will prevent vectorization and loading will fall back to cub::BLOCK_LOAD_DIRECT:
* - \p ITEMS_PER_THREAD is odd
* - The \p InputIteratorTis not a simple pointer type
* - The block input offset is not quadword-aligned
* - The data type \p T is not a built-in primitive or CUDA vector type (e.g., \p short, \p int2, \p double, \p float2, etc.)
*/
BLOCK_LOAD_VECTORIZE,
/**
* \par Overview
*
* A [<em>striped arrangement</em>](index.html#sec5sec3) of data is read
* efficiently from memory and then locally transposed into a
* [<em>blocked arrangement</em>](index.html#sec5sec3).
*
* \par Performance Considerations
* - The utilization of memory transactions (coalescing) remains high regardless
* of items loaded per thread.
* - The local reordering incurs slightly longer latencies and throughput than the
* direct cub::BLOCK_LOAD_DIRECT and cub::BLOCK_LOAD_VECTORIZE alternatives.
*/
BLOCK_LOAD_TRANSPOSE,
/**
* \par Overview
*
* A [<em>warp-striped arrangement</em>](index.html#sec5sec3) of data is
* read efficiently from memory and then locally transposed into a
* [<em>blocked arrangement</em>](index.html#sec5sec3).
*
* \par Usage Considerations
* - BLOCK_THREADS must be a multiple of WARP_THREADS
*
* \par Performance Considerations
* - The utilization of memory transactions (coalescing) remains high regardless
* of items loaded per thread.
* - The local reordering incurs slightly larger latencies than the
* direct cub::BLOCK_LOAD_DIRECT and cub::BLOCK_LOAD_VECTORIZE alternatives.
* - Provisions more shared storage, but incurs smaller latencies than the
* BLOCK_LOAD_WARP_TRANSPOSE_TIMESLICED alternative.
*/
BLOCK_LOAD_WARP_TRANSPOSE,
/**
* \par Overview
*
* Like \p BLOCK_LOAD_WARP_TRANSPOSE, a [<em>warp-striped arrangement</em>](index.html#sec5sec3)
* of data is read directly from memory and then is locally transposed into a
* [<em>blocked arrangement</em>](index.html#sec5sec3). To reduce the shared memory
* requirement, only one warp's worth of shared memory is provisioned and is
* subsequently time-sliced among warps.
*
* \par Usage Considerations
* - BLOCK_THREADS must be a multiple of WARP_THREADS
*
* \par Performance Considerations
* - The utilization of memory transactions (coalescing) remains high regardless
* of items loaded per thread.
* - Provisions less shared memory temporary storage, but incurs larger
* latencies than the BLOCK_LOAD_WARP_TRANSPOSE alternative.
*/
BLOCK_LOAD_WARP_TRANSPOSE_TIMESLICED,
};
/**
* \brief The BlockLoad class provides [<em>collective</em>](index.html#sec0) data movement methods for loading a linear segment of items from memory into a [<em>blocked arrangement</em>](index.html#sec5sec3) across a CUDA thread block. 
* \ingroup BlockModule
* \ingroup UtilIo
*
* \tparam InputT The data type to read into (which must be convertible from the input iterator's value type).
* \tparam BLOCK_DIM_X The thread block length in threads along the X dimension
* \tparam ITEMS_PER_THREAD The number of consecutive items partitioned onto each thread.
* \tparam ALGORITHM <b>[optional]</b> cub::BlockLoadAlgorithm tuning policy. default: cub::BLOCK_LOAD_DIRECT.
* \tparam WARP_TIME_SLICING <b>[optional]</b> Whether or not only one warp's worth of shared memory should be allocated and time-sliced among block-warps during any load-related data transpositions (versus each warp having its own storage). (default: false)
* \tparam BLOCK_DIM_Y <b>[optional]</b> The thread block length in threads along the Y dimension (default: 1)
* \tparam BLOCK_DIM_Z <b>[optional]</b> The thread block length in threads along the Z dimension (default: 1)
* \tparam PTX_ARCH <b>[optional]</b> \ptxversion
*
* \par Overview
* - The BlockLoad class provides a single data movement abstraction that can be specialized
* to implement different cub::BlockLoadAlgorithm strategies. This facilitates different
* performance policies for different architectures, data types, granularity sizes, etc.
* - BlockLoad can be optionally specialized by different data movement strategies:
* -# <b>cub::BLOCK_LOAD_DIRECT</b>. A [<em>blocked arrangement</em>](index.html#sec5sec3)
* of data is read directly from memory. [More...](\ref cub::BlockLoadAlgorithm)
* -# <b>cub::BLOCK_LOAD_STRIPED,</b>. A [<em>striped arrangement</em>](index.html#sec5sec3)
* of data is read directly from memory. [More...](\ref cub::BlockLoadAlgorithm)
* -# <b>cub::BLOCK_LOAD_VECTORIZE</b>. A [<em>blocked arrangement</em>](index.html#sec5sec3)
* of data is read directly from memory using CUDA's built-in vectorized loads as a
* coalescing optimization. [More...](\ref cub::BlockLoadAlgorithm)
* -# <b>cub::BLOCK_LOAD_TRANSPOSE</b>. A [<em>striped arrangement</em>](index.html#sec5sec3)
* of data is read directly from memory and is then locally transposed into a
* [<em>blocked arrangement</em>](index.html#sec5sec3). [More...](\ref cub::BlockLoadAlgorithm)
* -# <b>cub::BLOCK_LOAD_WARP_TRANSPOSE</b>. A [<em>warp-striped arrangement</em>](index.html#sec5sec3)
* of data is read directly from memory and is then locally transposed into a
* [<em>blocked arrangement</em>](index.html#sec5sec3). [More...](\ref cub::BlockLoadAlgorithm)
* -# <b>cub::BLOCK_LOAD_WARP_TRANSPOSE_TIMESLICED,</b>. A [<em>warp-striped arrangement</em>](index.html#sec5sec3)
* of data is read directly from memory and is then locally transposed into a
* [<em>blocked arrangement</em>](index.html#sec5sec3) one warp at a time. [More...](\ref cub::BlockLoadAlgorithm)
* - \rowmajor
*
* \par A Simple Example
* \blockcollective{BlockLoad}
* \par
* The code snippet below illustrates the loading of a linear
* segment of 512 integers into a "blocked" arrangement across 128 threads where each
* thread owns 4 consecutive items. The load is specialized for \p BLOCK_LOAD_WARP_TRANSPOSE,
* meaning memory references are efficiently coalesced using a warp-striped access
* pattern (after which items are locally reordered among threads).
* \par
* \code
* #include <cub/cub.cuh> // or equivalently <cub/block/block_load.cuh>
*
* __global__ void ExampleKernel(int *d_data, ...)
* {
* // Specialize BlockLoad for a 1D block of 128 threads owning 4 integer items each
* typedef cub::BlockLoad<int, 128, 4, BLOCK_LOAD_WARP_TRANSPOSE> BlockLoad;
*
* // Allocate shared memory for BlockLoad
* __shared__ typename BlockLoad::TempStorage temp_storage;
*
* // Load a segment of consecutive items that are blocked across threads
* int thread_data[4];
* BlockLoad(temp_storage).Load(d_data, thread_data);
*
* \endcode
* \par
* Suppose the input \p d_data is <tt>0, 1, 2, 3, 4, 5, ...</tt>.
* The set of \p thread_data across the block of threads in those threads will be
* <tt>{ [0,1,2,3], [4,5,6,7], ..., [508,509,510,511] }</tt>.
*
* \par Re-using dynamically allocating shared memory
* The following example under the examples/block folder illustrates usage of
* dynamically shared memory with BlockReduce and how to re-purpose
* the same memory region:
* <a href="../../examples/block/example_block_reduce_dyn_smem.cu">example_block_reduce_dyn_smem.cu</a>
*
* This example can be easily adapted to the storage required by BlockLoad.
*/
template <
typename InputT,
int BLOCK_DIM_X,
int ITEMS_PER_THREAD,
BlockLoadAlgorithm ALGORITHM = BLOCK_LOAD_DIRECT,
int BLOCK_DIM_Y = 1,
int BLOCK_DIM_Z = 1,
int PTX_ARCH = CUB_PTX_ARCH>
class BlockLoad
{
private:
/******************************************************************************
* Constants and typed definitions
******************************************************************************/
/// Constants
enum
{
/// The thread block size in threads
BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z,
};
/******************************************************************************
* Algorithmic variants
******************************************************************************/
/// Load helper
template <BlockLoadAlgorithm _POLICY, int DUMMY>
struct LoadInternal;
/**
* BLOCK_LOAD_DIRECT specialization of load helper
*/
template <int DUMMY>
struct LoadInternal<BLOCK_LOAD_DIRECT, DUMMY>
{
/// Shared memory storage layout type
typedef NullType TempStorage;
/// Linear thread-id
int linear_tid;
/// Constructor
__device__ __forceinline__ LoadInternal(
TempStorage &/*temp_storage*/,
int linear_tid)
:
linear_tid(linear_tid)
{}
/// Load a linear segment of items from memory
template <typename InputIteratorT>
__device__ __forceinline__ void Load(
InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from
InputT (&items)[ITEMS_PER_THREAD]) ///< [out] Data to load
{
LoadDirectBlocked(linear_tid, block_itr, items);
}
/// Load a linear segment of items from memory, guarded by range
template <typename InputIteratorT>
__device__ __forceinline__ void Load(
InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from
InputT (&items)[ITEMS_PER_THREAD], ///< [out] Data to load
int valid_items) ///< [in] Number of valid items to load
{
LoadDirectBlocked(linear_tid, block_itr, items, valid_items);
}
/// Load a linear segment of items from memory, guarded by range, with a fall-back assignment of out-of-bound elements
template <typename InputIteratorT, typename DefaultT>
__device__ __forceinline__ void Load(
InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from
InputT (&items)[ITEMS_PER_THREAD], ///< [out] Data to load
int valid_items, ///< [in] Number of valid items to load
DefaultT oob_default) ///< [in] Default value to assign out-of-bound items
{
LoadDirectBlocked(linear_tid, block_itr, items, valid_items, oob_default);
}
};
/**
* BLOCK_LOAD_STRIPED specialization of load helper
*/
template <int DUMMY>
struct LoadInternal<BLOCK_LOAD_STRIPED, DUMMY>
{
/// Shared memory storage layout type
typedef NullType TempStorage;
/// Linear thread-id
int linear_tid;
/// Constructor
__device__ __forceinline__ LoadInternal(
TempStorage &/*temp_storage*/,
int linear_tid)
:
linear_tid(linear_tid)
{}
/// Load a linear segment of items from memory
template <typename InputIteratorT>
__device__ __forceinline__ void Load(
InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from
InputT (&items)[ITEMS_PER_THREAD]) ///< [out] Data to load{
{
LoadDirectStriped<BLOCK_THREADS>(linear_tid, block_itr, items);
}
/// Load a linear segment of items from memory, guarded by range
template <typename InputIteratorT>
__device__ __forceinline__ void Load(
InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from
InputT (&items)[ITEMS_PER_THREAD], ///< [out] Data to load
int valid_items) ///< [in] Number of valid items to load
{
LoadDirectStriped<BLOCK_THREADS>(linear_tid, block_itr, items, valid_items);
}
/// Load a linear segment of items from memory, guarded by range, with a fall-back assignment of out-of-bound elements
template <typename InputIteratorT, typename DefaultT>
__device__ __forceinline__ void Load(
InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from
InputT (&items)[ITEMS_PER_THREAD], ///< [out] Data to load
int valid_items, ///< [in] Number of valid items to load
DefaultT oob_default) ///< [in] Default value to assign out-of-bound items
{
LoadDirectStriped<BLOCK_THREADS>(linear_tid, block_itr, items, valid_items, oob_default);
}
};
/**
* BLOCK_LOAD_VECTORIZE specialization of load helper
*/
template <int DUMMY>
struct LoadInternal<BLOCK_LOAD_VECTORIZE, DUMMY>
{
/// Shared memory storage layout type
typedef NullType TempStorage;
/// Linear thread-id
int linear_tid;
/// Constructor
__device__ __forceinline__ LoadInternal(
TempStorage &/*temp_storage*/,
int linear_tid)
:
linear_tid(linear_tid)
{}
/// Load a linear segment of items from memory, specialized for native pointer types (attempts vectorization)
template <typename InputIteratorT>
__device__ __forceinline__ void Load(
InputT *block_ptr, ///< [in] The thread block's base input iterator for loading from
InputT (&items)[ITEMS_PER_THREAD]) ///< [out] Data to load
{
InternalLoadDirectBlockedVectorized<LOAD_DEFAULT>(linear_tid, block_ptr, items);
}
/// Load a linear segment of items from memory, specialized for native pointer types (attempts vectorization)
template <typename InputIteratorT>
__device__ __forceinline__ void Load(
const InputT *block_ptr, ///< [in] The thread block's base input iterator for loading from
InputT (&items)[ITEMS_PER_THREAD]) ///< [out] Data to load
{
InternalLoadDirectBlockedVectorized<LOAD_DEFAULT>(linear_tid, block_ptr, items);
}
/// Load a linear segment of items from memory, specialized for native pointer types (attempts vectorization)
template <
CacheLoadModifier MODIFIER,
typename ValueType,
typename OffsetT>
__device__ __forceinline__ void Load(
CacheModifiedInputIterator<MODIFIER, ValueType, OffsetT> block_itr, ///< [in] The thread block's base input iterator for loading from
InputT (&items)[ITEMS_PER_THREAD]) ///< [out] Data to load
{
InternalLoadDirectBlockedVectorized<MODIFIER>(linear_tid, block_itr.ptr, items);
}
/// Load a linear segment of items from memory, specialized for opaque input iterators (skips vectorization)
template <typename _InputIteratorT>
__device__ __forceinline__ void Load(
_InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from
InputT (&items)[ITEMS_PER_THREAD]) ///< [out] Data to load
{
LoadDirectBlocked(linear_tid, block_itr, items);
}
/// Load a linear segment of items from memory, guarded by range (skips vectorization)
template <typename InputIteratorT>
__device__ __forceinline__ void Load(
InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from
InputT (&items)[ITEMS_PER_THREAD], ///< [out] Data to load
int valid_items) ///< [in] Number of valid items to load
{
LoadDirectBlocked(linear_tid, block_itr, items, valid_items);
}
/// Load a linear segment of items from memory, guarded by range, with a fall-back assignment of out-of-bound elements (skips vectorization)
template <typename InputIteratorT, typename DefaultT>
__device__ __forceinline__ void Load(
InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from
InputT (&items)[ITEMS_PER_THREAD], ///< [out] Data to load
int valid_items, ///< [in] Number of valid items to load
DefaultT oob_default) ///< [in] Default value to assign out-of-bound items
{
LoadDirectBlocked(linear_tid, block_itr, items, valid_items, oob_default);
}
};
/**
* BLOCK_LOAD_TRANSPOSE specialization of load helper
*/
template <int DUMMY>
struct LoadInternal<BLOCK_LOAD_TRANSPOSE, DUMMY>
{
// BlockExchange utility type for keys
typedef BlockExchange<InputT, BLOCK_DIM_X, ITEMS_PER_THREAD, false, BLOCK_DIM_Y, BLOCK_DIM_Z, PTX_ARCH> BlockExchange;
/// Shared memory storage layout type
struct _TempStorage : BlockExchange::TempStorage
{};
/// Alias wrapper allowing storage to be unioned
struct TempStorage : Uninitialized<_TempStorage> {};
/// Thread reference to shared storage
_TempStorage &temp_storage;
/// Linear thread-id
int linear_tid;
/// Constructor
__device__ __forceinline__ LoadInternal(
TempStorage &temp_storage,
int linear_tid)
:
temp_storage(temp_storage.Alias()),
linear_tid(linear_tid)
{}
/// Load a linear segment of items from memory
template <typename InputIteratorT>
__device__ __forceinline__ void Load(
InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from
InputT (&items)[ITEMS_PER_THREAD]) ///< [out] Data to load{
{
LoadDirectStriped<BLOCK_THREADS>(linear_tid, block_itr, items);
BlockExchange(temp_storage).StripedToBlocked(items, items);
}
/// Load a linear segment of items from memory, guarded by range
template <typename InputIteratorT>
__device__ __forceinline__ void Load(
InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from
InputT (&items)[ITEMS_PER_THREAD], ///< [out] Data to load
int valid_items) ///< [in] Number of valid items to load
{
LoadDirectStriped<BLOCK_THREADS>(linear_tid, block_itr, items, valid_items);
BlockExchange(temp_storage).StripedToBlocked(items, items);
}
/// Load a linear segment of items from memory, guarded by range, with a fall-back assignment of out-of-bound elements
template <typename InputIteratorT, typename DefaultT>
__device__ __forceinline__ void Load(
InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from
InputT (&items)[ITEMS_PER_THREAD], ///< [out] Data to load
int valid_items, ///< [in] Number of valid items to load
DefaultT oob_default) ///< [in] Default value to assign out-of-bound items
{
LoadDirectStriped<BLOCK_THREADS>(linear_tid, block_itr, items, valid_items, oob_default);
BlockExchange(temp_storage).StripedToBlocked(items, items);
}
};
/**
* BLOCK_LOAD_WARP_TRANSPOSE specialization of load helper
*/
template <int DUMMY>
struct LoadInternal<BLOCK_LOAD_WARP_TRANSPOSE, DUMMY>
{
enum
{
WARP_THREADS = CUB_WARP_THREADS(PTX_ARCH)
};
// Assert BLOCK_THREADS must be a multiple of WARP_THREADS
CUB_STATIC_ASSERT((int(BLOCK_THREADS) % int(WARP_THREADS) == 0), "BLOCK_THREADS must be a multiple of WARP_THREADS");
// BlockExchange utility type for keys
typedef BlockExchange<InputT, BLOCK_DIM_X, ITEMS_PER_THREAD, false, BLOCK_DIM_Y, BLOCK_DIM_Z, PTX_ARCH> BlockExchange;
/// Shared memory storage layout type
struct _TempStorage : BlockExchange::TempStorage
{};
/// Alias wrapper allowing storage to be unioned
struct TempStorage : Uninitialized<_TempStorage> {};
/// Thread reference to shared storage
_TempStorage &temp_storage;
/// Linear thread-id
int linear_tid;
/// Constructor
__device__ __forceinline__ LoadInternal(
TempStorage &temp_storage,
int linear_tid)
:
temp_storage(temp_storage.Alias()),
linear_tid(linear_tid)
{}
/// Load a linear segment of items from memory
template <typename InputIteratorT>
__device__ __forceinline__ void Load(
InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from
InputT (&items)[ITEMS_PER_THREAD]) ///< [out] Data to load{
{
LoadDirectWarpStriped(linear_tid, block_itr, items);
BlockExchange(temp_storage).WarpStripedToBlocked(items, items);
}
/// Load a linear segment of items from memory, guarded by range
template <typename InputIteratorT>
__device__ __forceinline__ void Load(
InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from
InputT (&items)[ITEMS_PER_THREAD], ///< [out] Data to load
int valid_items) ///< [in] Number of valid items to load
{
LoadDirectWarpStriped(linear_tid, block_itr, items, valid_items);
BlockExchange(temp_storage).WarpStripedToBlocked(items, items);
}
/// Load a linear segment of items from memory, guarded by range, with a fall-back assignment of out-of-bound elements
template <typename InputIteratorT, typename DefaultT>
__device__ __forceinline__ void Load(
InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from
InputT (&items)[ITEMS_PER_THREAD], ///< [out] Data to load
int valid_items, ///< [in] Number of valid items to load
DefaultT oob_default) ///< [in] Default value to assign out-of-bound items
{
LoadDirectWarpStriped(linear_tid, block_itr, items, valid_items, oob_default);
BlockExchange(temp_storage).WarpStripedToBlocked(items, items);
}
};
/**
* BLOCK_LOAD_WARP_TRANSPOSE_TIMESLICED specialization of load helper
*/
template <int DUMMY>
struct LoadInternal<BLOCK_LOAD_WARP_TRANSPOSE_TIMESLICED, DUMMY>
{
enum
{
WARP_THREADS = CUB_WARP_THREADS(PTX_ARCH)
};
// Assert BLOCK_THREADS must be a multiple of WARP_THREADS
CUB_STATIC_ASSERT((int(BLOCK_THREADS) % int(WARP_THREADS) == 0), "BLOCK_THREADS must be a multiple of WARP_THREADS");
// BlockExchange utility type for keys
typedef BlockExchange<InputT, BLOCK_DIM_X, ITEMS_PER_THREAD, true, BLOCK_DIM_Y, BLOCK_DIM_Z, PTX_ARCH> BlockExchange;
/// Shared memory storage layout type
struct _TempStorage : BlockExchange::TempStorage
{};
/// Alias wrapper allowing storage to be unioned
struct TempStorage : Uninitialized<_TempStorage> {};
/// Thread reference to shared storage
_TempStorage &temp_storage;
/// Linear thread-id
int linear_tid;
/// Constructor
__device__ __forceinline__ LoadInternal(
TempStorage &temp_storage,
int linear_tid)
:
temp_storage(temp_storage.Alias()),
linear_tid(linear_tid)
{}
/// Load a linear segment of items from memory
template <typename InputIteratorT>
__device__ __forceinline__ void Load(
InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from
InputT (&items)[ITEMS_PER_THREAD]) ///< [out] Data to load{
{
LoadDirectWarpStriped(linear_tid, block_itr, items);
BlockExchange(temp_storage).WarpStripedToBlocked(items, items);
}
/// Load a linear segment of items from memory, guarded by range
template <typename InputIteratorT>
__device__ __forceinline__ void Load(
InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from
InputT (&items)[ITEMS_PER_THREAD], ///< [out] Data to load
int valid_items) ///< [in] Number of valid items to load
{
LoadDirectWarpStriped(linear_tid, block_itr, items, valid_items);
BlockExchange(temp_storage).WarpStripedToBlocked(items, items);
}
/// Load a linear segment of items from memory, guarded by range, with a fall-back assignment of out-of-bound elements
template <typename InputIteratorT, typename DefaultT>
__device__ __forceinline__ void Load(
InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from
InputT (&items)[ITEMS_PER_THREAD], ///< [out] Data to load
int valid_items, ///< [in] Number of valid items to load
DefaultT oob_default) ///< [in] Default value to assign out-of-bound items
{
LoadDirectWarpStriped(linear_tid, block_itr, items, valid_items, oob_default);
BlockExchange(temp_storage).WarpStripedToBlocked(items, items);
}
};
/******************************************************************************
* Type definitions
******************************************************************************/
/// Internal load implementation to use
typedef LoadInternal<ALGORITHM, 0> InternalLoad;
/// Shared memory storage layout type
typedef typename InternalLoad::TempStorage _TempStorage;
/******************************************************************************
* Utility methods
******************************************************************************/
/// Internal storage allocator
__device__ __forceinline__ _TempStorage& PrivateStorage()
{
__shared__ _TempStorage private_storage;
return private_storage;
}
/******************************************************************************
* Thread fields
******************************************************************************/
/// Thread reference to shared storage
_TempStorage &temp_storage;
/// Linear thread-id
int linear_tid;
public:
/// \smemstorage{BlockLoad}
struct TempStorage : Uninitialized<_TempStorage> {};
/******************************************************************//**
* \name Collective constructors
*********************************************************************/
//@{
/**
* \brief Collective constructor using a private static allocation of shared memory as temporary storage.
*/
__device__ __forceinline__ BlockLoad()
:
temp_storage(PrivateStorage()),
linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z))
{}
/**
* \brief Collective constructor using the specified memory allocation as temporary storage.
*/
__device__ __forceinline__ BlockLoad(
TempStorage &temp_storage) ///< [in] Reference to memory allocation having layout type TempStorage
:
temp_storage(temp_storage.Alias()),
linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z))
{}
//@} end member group
/******************************************************************//**
* \name Data movement
*********************************************************************/
//@{
/**
* \brief Load a linear segment of items from memory.
*
* \par
* - \blocked
* - \smemreuse
*
* \par Snippet
* The code snippet below illustrates the loading of a linear
* segment of 512 integers into a "blocked" arrangement across 128 threads where each
* thread owns 4 consecutive items. The load is specialized for \p BLOCK_LOAD_WARP_TRANSPOSE,
* meaning memory references are efficiently coalesced using a warp-striped access
* pattern (after which items are locally reordered among threads).
* \par
* \code
* #include <cub/cub.cuh> // or equivalently <cub/block/block_load.cuh>
*
* __global__ void ExampleKernel(int *d_data, ...)
* {
* // Specialize BlockLoad for a 1D block of 128 threads owning 4 integer items each
* typedef cub::BlockLoad<int, 128, 4, BLOCK_LOAD_WARP_TRANSPOSE> BlockLoad;
*
* // Allocate shared memory for BlockLoad
* __shared__ typename BlockLoad::TempStorage temp_storage;
*
* // Load a segment of consecutive items that are blocked across threads
* int thread_data[4];
* BlockLoad(temp_storage).Load(d_data, thread_data);
*
* \endcode
* \par
* Suppose the input \p d_data is <tt>0, 1, 2, 3, 4, 5, ...</tt>.
* The set of \p thread_data across the block of threads in those threads will be
* <tt>{ [0,1,2,3], [4,5,6,7], ..., [508,509,510,511] }</tt>.
*
*/
template <typename InputIteratorT>
__device__ __forceinline__ void Load(
InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from
InputT (&items)[ITEMS_PER_THREAD]) ///< [out] Data to load
{
InternalLoad(temp_storage, linear_tid).Load(block_itr, items);
}
/**
* \brief Load a linear segment of items from memory, guarded by range.
*
* \par
* - \blocked
* - \smemreuse
*
* \par Snippet
* The code snippet below illustrates the guarded loading of a linear
* segment of 512 integers into a "blocked" arrangement across 128 threads where each
* thread owns 4 consecutive items. The load is specialized for \p BLOCK_LOAD_WARP_TRANSPOSE,
* meaning memory references are efficiently coalesced using a warp-striped access
* pattern (after which items are locally reordered among threads).
* \par
* \code
* #include <cub/cub.cuh> // or equivalently <cub/block/block_load.cuh>
*
* __global__ void ExampleKernel(int *d_data, int valid_items, ...)
* {
* // Specialize BlockLoad for a 1D block of 128 threads owning 4 integer items each
* typedef cub::BlockLoad<int, 128, 4, BLOCK_LOAD_WARP_TRANSPOSE> BlockLoad;
*
* // Allocate shared memory for BlockLoad
* __shared__ typename BlockLoad::TempStorage temp_storage;
*
* // Load a segment of consecutive items that are blocked across threads
* int thread_data[4];
* BlockLoad(temp_storage).Load(d_data, thread_data, valid_items);
*
* \endcode
* \par
* Suppose the input \p d_data is <tt>0, 1, 2, 3, 4, 5, 6...</tt> and \p valid_items is \p 5.
* The set of \p thread_data across the block of threads in those threads will be
* <tt>{ [0,1,2,3], [4,?,?,?], ..., [?,?,?,?] }</tt>, with only the first two threads
* being unmasked to load portions of valid data (and other items remaining unassigned).
*
*/
template <typename InputIteratorT>
__device__ __forceinline__ void Load(
InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from
InputT (&items)[ITEMS_PER_THREAD], ///< [out] Data to load
int valid_items) ///< [in] Number of valid items to load
{
InternalLoad(temp_storage, linear_tid).Load(block_itr, items, valid_items);
}
/**
* \brief Load a linear segment of items from memory, guarded by range, with a fall-back assignment of out-of-bound elements
*
* \par
* - \blocked
* - \smemreuse
*
* \par Snippet
* The code snippet below illustrates the guarded loading of a linear
* segment of 512 integers into a "blocked" arrangement across 128 threads where each
* thread owns 4 consecutive items. The load is specialized for \p BLOCK_LOAD_WARP_TRANSPOSE,
* meaning memory references are efficiently coalesced using a warp-striped access
* pattern (after which items are locally reordered among threads).
* \par
* \code
* #include <cub/cub.cuh> // or equivalently <cub/block/block_load.cuh>
*
* __global__ void ExampleKernel(int *d_data, int valid_items, ...)
* {
* // Specialize BlockLoad for a 1D block of 128 threads owning 4 integer items each
* typedef cub::BlockLoad<int, 128, 4, BLOCK_LOAD_WARP_TRANSPOSE> BlockLoad;
*
* // Allocate shared memory for BlockLoad
* __shared__ typename BlockLoad::TempStorage temp_storage;
*
* // Load a segment of consecutive items that are blocked across threads
* int thread_data[4];
* BlockLoad(temp_storage).Load(d_data, thread_data, valid_items, -1);
*
* \endcode
* \par
* Suppose the input \p d_data is <tt>0, 1, 2, 3, 4, 5, 6...</tt>,
* \p valid_items is \p 5, and the out-of-bounds default is \p -1.
* The set of \p thread_data across the block of threads in those threads will be
* <tt>{ [0,1,2,3], [4,-1,-1,-1], ..., [-1,-1,-1,-1] }</tt>, with only the first two threads
* being unmasked to load portions of valid data (and other items are assigned \p -1)
*
*/
template <typename InputIteratorT, typename DefaultT>
__device__ __forceinline__ void Load(
InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from
InputT (&items)[ITEMS_PER_THREAD], ///< [out] Data to load
int valid_items, ///< [in] Number of valid items to load
DefaultT oob_default) ///< [in] Default value to assign out-of-bound items
{
InternalLoad(temp_storage, linear_tid).Load(block_itr, items, valid_items, oob_default);
}
//@} end member group
};
template <class Policy,
class It,
class T = typename std::iterator_traits<It>::value_type>
struct BlockLoadType
{
using type = cub::BlockLoad<T,
Policy::BLOCK_THREADS,
Policy::ITEMS_PER_THREAD,
Policy::LOAD_ALGORITHM>;
};
CUB_NAMESPACE_END
|
the_stack
|
Copyright (C) 2014-16 Andrea Vedaldi.
All rights reserved.
This file is part of the VLFeat library and is made available under
the terms of the BSD license (see the COPYING file).
*/
#include "bits/impl/tinythread.h"
#include "bits/imread.hpp"
#include "bits/impl/imread_helpers.hpp"
#include <vector>
#include <string>
#include <algorithm>
#include "bits/data.hpp"
#include "bits/mexutils.h"
/* option codes */
enum {
opt_num_threads = 0,
opt_prefetch,
opt_resize,
opt_verbose,
} ;
/* options */
VLMXOption options [] = {
{"NumThreads", 1, opt_num_threads },
{"Prefetch", 0, opt_prefetch },
{"Verbose", 0, opt_verbose },
{"Resize", 1, opt_resize },
{0, 0, 0 }
} ;
enum {
IN_FILENAMES = 0, IN_END
} ;
enum {
OUT_IMAGES = 0, OUT_END
} ;
enum ResizeMode
{
kResizeNone,
kResizeAnisotropic,
kResizeIsotropic,
} ;
/* ---------------------------------------------------------------- */
/* Caches */
/* ---------------------------------------------------------------- */
class ImageBuffer : public vl::Image
{
public:
ImageBuffer()
: vl::Image(), hasMatlabMemory(false), isMemoryOwner(false)
{ }
ImageBuffer(ImageBuffer const & im)
: vl::Image(im), hasMatlabMemory(im.hasMatlabMemory), isMemoryOwner(false)
{ }
~ImageBuffer()
{
clear() ;
}
ImageBuffer & operator = (ImageBuffer const & imb)
{
clear() ;
vl::Image::operator=(imb) ;
hasMatlabMemory = imb.hasMatlabMemory ;
isMemoryOwner = false ;
return *this ;
}
void clear()
{
if (isMemoryOwner && memory) {
if (hasMatlabMemory) {
mxFree(memory) ;
} else {
free(memory) ;
}
}
isMemoryOwner = false ;
hasMatlabMemory = false ;
vl::Image::clear() ;
}
float * relinquishMemory()
{
float * memory_ = memory ;
isMemoryOwner = false ;
clear() ;
return memory_ ;
}
vl::ErrorCode init(vl::ImageShape const & shape_, bool matlab_)
{
clear() ;
shape = shape_ ;
isMemoryOwner = true ;
if (matlab_) {
memory = (float*)mxMalloc(sizeof(float)*shape.getNumElements()) ;
mexMakeMemoryPersistent(memory) ;
hasMatlabMemory = true ;
} else {
memory = (float*)malloc(sizeof(float)*shape.getNumElements()) ;
hasMatlabMemory = false ;
}
return vl::VLE_Success ;
}
bool hasMatlabMemory ;
bool isMemoryOwner ;
} ;
#define TASK_ERROR_MSG_MAX_LEN 1024
struct Task
{
std::string name ;
bool done ;
ImageBuffer resizedImage ;
ImageBuffer inputImage ;
vl::ErrorCode error ;
bool requireResize ;
char errorMessage [TASK_ERROR_MSG_MAX_LEN] ;
Task() { }
private:
Task(Task const &) ;
Task & operator= (Task const &) ;
} ;
typedef std::vector<Task*> Tasks ;
Tasks tasks ;
tthread::mutex tasksMutex ;
tthread::condition_variable tasksCondition ;
tthread::condition_variable completedCondition ;
int nextTaskIndex = 0 ;
int numTasksCompleted = 0 ;
typedef std::pair<tthread::thread*,vl::ImageReader*> reader_t ;
typedef std::vector<reader_t> readers_t ;
readers_t readers ;
bool terminateReaders = true ;
/* ---------------------------------------------------------------- */
/* Tasks and readers */
/* ---------------------------------------------------------------- */
void reader_function(void* reader_)
{
vl::ImageReader* reader = (vl::ImageReader*) reader_ ;
int taskIndex ;
tasksMutex.lock() ;
while (true) {
// wait for next task
while ((nextTaskIndex >= tasks.size()) && ! terminateReaders) {
tasksCondition.wait(tasksMutex);
}
if (terminateReaders) {
break ;
}
taskIndex = nextTaskIndex++ ;
Task & thisTask = *tasks[taskIndex] ;
tasksMutex.unlock() ;
if (thisTask.error == vl::VLE_Success) {
// the memory has been pre-allocated
thisTask.error = reader->readPixels(thisTask.inputImage.getMemory(), thisTask.name.c_str()) ;
if (thisTask.error != vl::VLE_Success) {
strncpy(thisTask.errorMessage, reader->getLastErrorMessage(), TASK_ERROR_MSG_MAX_LEN) ;
}
}
if ((thisTask.error == vl::VLE_Success) && thisTask.requireResize) {
vl::impl::resizeImage(thisTask.resizedImage, thisTask.inputImage) ;
}
tasksMutex.lock() ;
thisTask.done = true ;
numTasksCompleted ++ ;
completedCondition.notify_all() ;
}
tasksMutex.unlock() ;
}
void delete_readers()
{
tasksMutex.lock() ;
terminateReaders = true ;
tasksMutex.unlock() ;
tasksCondition.notify_all() ;
for (int r = 0 ; r < (int)readers.size() ; ++r) {
readers[r].first->join() ;
delete readers[r].first ;
delete readers[r].second ;
}
readers.clear() ;
}
void create_readers(int num, int verbosity)
{
if (num <= 0) {
num = (std::max)(1, (int)readers.size()) ;
}
if (readers.size() == num) {
return ;
}
if (verbosity > 1) { mexPrintf("vl_imreadjpeg: flushing reader threads\n") ; }
delete_readers() ;
terminateReaders = false ;
for (int r = 0 ; r < num ; ++r) {
vl::ImageReader * reader = new vl::ImageReader() ;
tthread::thread * readerThread = new tthread::thread(reader_function, reader) ;
readers.push_back(reader_t(readerThread, reader)) ;
}
if (verbosity > 1) { mexPrintf("vl_imreadjpeg: created %d reader threads\n", readers.size()) ; }
}
void delete_tasks() {
for (int t = 0 ; t < (int)tasks.size() ; ++t) {
if (tasks[t]) { delete tasks[t] ; }
}
tasks.clear() ;
}
void flush_tasks() {
// wait until all tasks in the current list are complete
tasksMutex.lock() ;
while (numTasksCompleted < (int)tasks.size()) {
completedCondition.wait(tasksMutex);
}
// now delete them
delete_tasks() ;
numTasksCompleted = 0 ;
nextTaskIndex = 0 ;
tasksMutex.unlock() ;
}
void atExit()
{
delete_readers() ;
delete_tasks() ;
}
/* ---------------------------------------------------------------- */
/* Cache */
/* ---------------------------------------------------------------- */
void mexFunction(int nout, mxArray *out[],
int nin, mxArray const *in[])
{
bool prefetch = false ;
int requestedNumThreads = -1 ;
int verbosity = 0 ;
int opt ;
int next = IN_END ;
mxArray const *optarg ;
int i ;
ResizeMode resizeMode = kResizeNone ;
int resizeWidth = 1 ;
int resizeHeight = 1 ;
/* -------------------------------------------------------------- */
/* Check the arguments */
/* -------------------------------------------------------------- */
mexAtExit(atExit) ;
if (nin < 1) {
mexErrMsgTxt("There is less than one argument.") ;
}
while ((opt = vlmxNextOption (in, nin, options, &next, &optarg)) >= 0) {
switch (opt) {
case opt_verbose :
++ verbosity ;
break ;
case opt_prefetch :
prefetch = true ;
break ;
case opt_resize :
if (!vlmxIsPlainVector(optarg, -1)) {
mexErrMsgTxt("RESIZE is not a plain vector.") ;
}
switch (mxGetNumberOfElements(optarg)) {
case 1 :
resizeMode = kResizeIsotropic ;
resizeHeight = (int)mxGetPr(optarg)[0] ;
// resizeWidth other has the dummy value 1
break ;
case 2 :
resizeMode = kResizeAnisotropic ;
resizeHeight = (int)mxGetPr(optarg)[0] ;
resizeWidth = (int)mxGetPr(optarg)[1] ;
break;
default:
mexErrMsgTxt("RESIZE does not have one or two dimensions.") ;
break ;
}
if (resizeHeight < 1 || resizeWidth < 1) {
mexErrMsgTxt("An element of RESIZE is smaller than one.") ;
}
break ;
case opt_num_threads :
requestedNumThreads = (int)mxGetScalar(optarg) ;
break ;
}
}
if (!mxIsCell(in[IN_FILENAMES])) {
mexErrMsgTxt("FILENAMES is not a cell array of strings.") ;
}
// prepare reader tasks
create_readers(requestedNumThreads, verbosity) ;
if (verbosity) {
mexPrintf("vl_imreadjpeg: numThreads = %d, prefetch = %d\n",
readers.size(), prefetch) ;
switch (resizeMode) {
case kResizeIsotropic:
mexPrintf("vl_imreadjpeg: isotropic resize to x%d\n", resizeHeight) ;
break ;
case kResizeAnisotropic:
mexPrintf("vl_imreadjpeg: anisotropic resize to %dx%d\n", resizeHeight, resizeWidth) ;
break ;
default:
break ;
}
}
// extract filenames as strings
std::vector<std::string> filenames ;
for (i = 0 ; i < (int)mxGetNumberOfElements(in[IN_FILENAMES]) ; ++i) {
mxArray* filename_array = mxGetCell(in[IN_FILENAMES], i) ;
if (!vlmxIsString(filename_array,-1)) {
mexErrMsgTxt("FILENAMES contains an entry that is not a string.") ;
}
char filename [4096] ;
mxGetString (filename_array, filename, sizeof(filename)/sizeof(char)) ;
filenames.push_back(std::string(filename)) ;
}
// check if the cached tasks match the new ones
bool match = true ;
for (int t = 0 ; match & (t < (signed)filenames.size()) ; ++t) {
if (t >= (signed)tasks.size()) {
match = false ;
break ;
}
match &= (tasks[t]->name == filenames[t]) ;
}
// if there is no match, then flush tasks and start over
if (!match) {
if (verbosity > 1) {
mexPrintf("vl_imreadjpeg: flushing tasks\n") ;
}
flush_tasks() ;
tasksMutex.lock() ;
for (int t = 0 ; t < (signed)filenames.size() ; ++t) {
Task* newTask(new Task()) ;
newTask->name = filenames[t] ;
newTask->done = false ;
ImageBuffer & inputImage = newTask->inputImage ;
ImageBuffer & resizedImage = newTask->resizedImage ;
vl::ImageShape shape ;
newTask->error = readers[0].second->readShape(shape, filenames[t].c_str()) ;
if (newTask->error == vl::VLE_Success) {
vl::ImageShape resizedShape = shape ;
switch (resizeMode) {
case kResizeAnisotropic:
resizedShape.height = resizeHeight ;
resizedShape.width = resizeWidth ;
break ;
case kResizeIsotropic:
{
// note: not a bug below, resizeHeight contains the only resize param
float scale = (std::max)((float)resizeHeight / shape.width,
(float)resizeHeight / shape.height);
resizedShape.height = roundf(resizedShape.height * scale) ;
resizedShape.width = roundf(resizedShape.width * scale) ;
break ;
}
default:
break ;
}
newTask->requireResize = ! (resizedShape == shape) ;
if (newTask->requireResize) {
newTask->error = inputImage.init(shape, false) ;
if (newTask->error == vl::VLE_Success) {
newTask->error = resizedImage.init(resizedShape, true) ;
}
} else {
newTask->error = resizedImage.init(shape, true) ;
// alias: remark: resized image will be asked to release memory so it *must* be the owner
inputImage = resizedImage ;
}
} else {
strncpy(newTask->errorMessage, readers[0].second->getLastErrorMessage(), TASK_ERROR_MSG_MAX_LEN) ;
char message [1024*2] ;
int offset = snprintf(message, sizeof(message)/sizeof(char),
"could not read the header of image '%s'", newTask->name.c_str()) ;
if (strlen(newTask->errorMessage) > 0) {
snprintf(message + offset, sizeof(message)/sizeof(char) - offset,
" [%s]", newTask->errorMessage) ;
}
mexWarnMsgTxt(message) ;
}
tasks.push_back(newTask) ;
}
tasksMutex.unlock() ;
tasksCondition.notify_all() ;
}
// done if prefetching only
if (prefetch) { return ; }
// return
out[OUT_IMAGES] = mxCreateCellArray(mxGetNumberOfDimensions(in[IN_FILENAMES]),
mxGetDimensions(in[IN_FILENAMES])) ;
for (int t = 0 ; t < tasks.size() ; ++t) {
tasksMutex.lock() ;
while (!tasks[t]->done) {
completedCondition.wait(tasksMutex);
}
ImageBuffer & image = tasks[t]->resizedImage ;
tasksMutex.unlock() ;
if (tasks[t]->error == vl::VLE_Success) {
vl::ImageShape const & shape = image.getShape() ;
mwSize dimensions [3] = {
(mwSize)shape.height,
(mwSize)shape.width,
(mwSize)shape.depth} ;
mwSize dimensions_ [3] = {0} ;
mxArray * image_array = mxCreateNumericArray(3, dimensions_, mxSINGLE_CLASS, mxREAL) ;
mxSetDimensions(image_array, dimensions, 3) ;
mxSetData(image_array, image.relinquishMemory()) ;
mxSetCell(out[OUT_IMAGES], t, image_array) ;
} else {
strncpy(tasks[t]->errorMessage, readers[0].second->getLastErrorMessage(), TASK_ERROR_MSG_MAX_LEN) ;
char message [1024*2] ;
int offset = snprintf(message, sizeof(message)/sizeof(char),
"could not read image '%s'", tasks[t]->name.c_str()) ;
if (strlen(tasks[t]->errorMessage) > 0) {
snprintf(message + offset, sizeof(message)/sizeof(char) - offset,
" [%s]", tasks[t]->errorMessage) ;
}
mexWarnMsgTxt(message) ;
}
}
flush_tasks() ;
}
|
the_stack
|
// This file was copied from libc++'s test suite, then modified to test CUDA.
// For the most part, this consists of adding __device__ attributes and
// deleting long double.
// <cmath>
// This test requires c++11 (it's mostly decltype stuff).
#if __cplusplus >= 201103L
#include <cmath>
#include <type_traits>
#include <cassert>
#include <stdio.h>
// See PR21083
// Ambiguous is a user-defined type that defines its own overloads of cmath
// functions. When the std overloads are candidates too (by using or adl),
// they should not interfere.
struct Ambiguous : std::true_type { // ADL
__device__ operator float () { return 0.f; }
__device__ operator double () { return 0.; }
};
__device__ Ambiguous abs(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous acos(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous asin(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous atan(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous atan2(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous ceil(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous cos(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous cosh(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous exp(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous fabs(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous floor(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous fmod(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous frexp(Ambiguous, int*){ return Ambiguous(); }
__device__ Ambiguous ldexp(Ambiguous, int){ return Ambiguous(); }
__device__ Ambiguous log(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous log10(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous modf(Ambiguous, Ambiguous*){ return Ambiguous(); }
__device__ Ambiguous pow(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous sin(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous sinh(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous sqrt(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous tan(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous tanh(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous signbit(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous fpclassify(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous isfinite(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous isnormal(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous isgreater(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous isgreaterequal(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous isless(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous islessequal(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous islessgreater(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous isunordered(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous acosh(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous asinh(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous atanh(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous cbrt(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous copysign(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous erf(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous erfc(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous exp2(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous expm1(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous fdim(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous fma(Ambiguous, Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous fmax(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous fmin(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous hypot(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous hypot(Ambiguous, Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous ilogb(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous lgamma(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous llrint(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous llround(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous log1p(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous log2(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous logb(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous lrint(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous lround(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous nearbyint(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous nextafter(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous remainder(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous remquo(Ambiguous, Ambiguous, int*){ return Ambiguous(); }
__device__ Ambiguous rint(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous round(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous scalbln(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous scalbn(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous tgamma(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous trunc(Ambiguous){ return Ambiguous(); }
__device__ void test_abs()
{
static_assert((std::is_same<decltype(std::abs((float)0)), float>::value), "");
static_assert((std::is_same<decltype(std::abs((double)0)), double>::value), "");
static_assert((std::is_same<decltype(abs(Ambiguous())), Ambiguous>::value), "");
assert(std::abs(-1) == 1);
assert(std::abs(-1.) == 1);
assert(std::abs(-1.f) == 1);
}
__device__ void test_acos()
{
static_assert((std::is_same<decltype(std::acos((float)0)), float>::value), "");
static_assert((std::is_same<decltype(std::acos((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(std::acos((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(std::acos((int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::acos((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::acos((long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::acos((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::acos((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::acos((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::acos((double)0)), double>::value), "");
static_assert((std::is_same<decltype(std::acosf(0)), float>::value), "");
static_assert((std::is_same<decltype(acos(Ambiguous())), Ambiguous>::value), "");
assert(std::acos(1) == 0);
assert(std::acos(1.) == 0);
assert(std::acos(1.f) == 0);
}
__device__ void test_asin()
{
static_assert((std::is_same<decltype(std::asin((float)0)), float>::value), "");
static_assert((std::is_same<decltype(std::asin((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(std::asin((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(std::asin((int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::asin((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::asin((long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::asin((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::asin((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::asin((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::asin((double)0)), double>::value), "");
static_assert((std::is_same<decltype(std::asinf(0)), float>::value), "");
static_assert((std::is_same<decltype(asin(Ambiguous())), Ambiguous>::value), "");
assert(std::asin(0) == 0);
assert(std::asin(0.) == 0);
assert(std::asin(0.f) == 0);
}
__device__ void test_atan()
{
static_assert((std::is_same<decltype(std::atan((float)0)), float>::value), "");
static_assert((std::is_same<decltype(std::atan((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(std::atan((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(std::atan((int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::atan((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::atan((long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::atan((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::atan((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::atan((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::atan((double)0)), double>::value), "");
static_assert((std::is_same<decltype(std::atanf(0)), float>::value), "");
static_assert((std::is_same<decltype(atan(Ambiguous())), Ambiguous>::value), "");
assert(std::atan(0) == 0);
assert(std::atan(0.) == 0);
assert(std::atan(0.f) == 0);
}
__device__ void test_atan2()
{
static_assert((std::is_same<decltype(std::atan2((float)0, (float)0)), float>::value), "");
static_assert((std::is_same<decltype(std::atan2((bool)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(std::atan2((unsigned short)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(std::atan2((float)0, (unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::atan2((double)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::atan2((int)0, (long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::atan2((int)0, (unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::atan2((double)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(std::atan2((float)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(std::atan2f(0,0)), float>::value), "");
static_assert((std::is_same<decltype(std::atan2((int)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(atan2(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::atan2(0, 1) == 0);
assert(std::atan2(0, 1.) == 0);
assert(std::atan2(0, 1.f) == 0);
assert(std::atan2(0., 1) == 0);
assert(std::atan2(0., 1.) == 0);
assert(std::atan2(0., 1.f) == 0);
assert(std::atan2(0.f, 1) == 0);
assert(std::atan2(0.f, 1.) == 0);
assert(std::atan2(0.f, 1.f) == 0);
}
__device__ void test_ceil()
{
static_assert((std::is_same<decltype(std::ceil((float)0)), float>::value), "");
static_assert((std::is_same<decltype(std::ceil((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(std::ceil((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(std::ceil((int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::ceil((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::ceil((long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::ceil((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::ceil((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::ceil((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::ceil((double)0)), double>::value), "");
static_assert((std::is_same<decltype(std::ceilf(0)), float>::value), "");
static_assert((std::is_same<decltype(ceil(Ambiguous())), Ambiguous>::value), "");
assert(std::ceil(0) == 0);
assert(std::ceil(0.) == 0);
assert(std::ceil(0.f) == 0);
}
__device__ void test_cos()
{
static_assert((std::is_same<decltype(std::cos((float)0)), float>::value), "");
static_assert((std::is_same<decltype(std::cos((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(std::cos((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(std::cos((int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::cos((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::cos((long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::cos((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::cos((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::cos((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::cos((double)0)), double>::value), "");
static_assert((std::is_same<decltype(std::cosf(0)), float>::value), "");
static_assert((std::is_same<decltype(cos(Ambiguous())), Ambiguous>::value), "");
assert(std::cos(0) == 1);
assert(std::cos(0.) == 1);
assert(std::cos(0.f) == 1);
}
__device__ void test_cosh()
{
static_assert((std::is_same<decltype(std::cosh((float)0)), float>::value), "");
static_assert((std::is_same<decltype(std::cosh((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(std::cosh((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(std::cosh((int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::cosh((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::cosh((long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::cosh((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::cosh((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::cosh((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::cosh((double)0)), double>::value), "");
static_assert((std::is_same<decltype(std::coshf(0)), float>::value), "");
static_assert((std::is_same<decltype(cosh(Ambiguous())), Ambiguous>::value), "");
assert(std::cosh(0) == 1);
assert(std::cosh(0.) == 1);
assert(std::cosh(0.f) == 1);
}
__device__ void test_exp()
{
static_assert((std::is_same<decltype(std::exp((float)0)), float>::value), "");
static_assert((std::is_same<decltype(std::exp((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(std::exp((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(std::exp((int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::exp((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::exp((long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::exp((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::exp((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::exp((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::exp((double)0)), double>::value), "");
static_assert((std::is_same<decltype(std::expf(0)), float>::value), "");
static_assert((std::is_same<decltype(exp(Ambiguous())), Ambiguous>::value), "");
assert(std::exp(0) == 1);
assert(std::exp(0.) == 1);
assert(std::exp(0.f) == 1);
}
__device__ void test_fabs()
{
static_assert((std::is_same<decltype(std::fabs((float)0)), float>::value), "");
static_assert((std::is_same<decltype(std::fabs((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(std::fabs((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(std::fabs((int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::fabs((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::fabs((long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::fabs((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::fabs((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::fabs((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::fabs((double)0)), double>::value), "");
static_assert((std::is_same<decltype(std::fabsf(0.0f)), float>::value), "");
static_assert((std::is_same<decltype(fabs(Ambiguous())), Ambiguous>::value), "");
assert(std::fabs(-1) == 1);
assert(std::fabs(-1.) == 1);
assert(std::fabs(-1.f) == 1);
}
__device__ void test_floor()
{
static_assert((std::is_same<decltype(std::floor((float)0)), float>::value), "");
static_assert((std::is_same<decltype(std::floor((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(std::floor((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(std::floor((int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::floor((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::floor((long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::floor((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::floor((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::floor((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::floor((double)0)), double>::value), "");
static_assert((std::is_same<decltype(std::floorf(0)), float>::value), "");
static_assert((std::is_same<decltype(floor(Ambiguous())), Ambiguous>::value), "");
assert(std::floor(1) == 1);
assert(std::floor(1.) == 1);
assert(std::floor(1.f) == 1);
}
__device__ void test_fmod()
{
static_assert((std::is_same<decltype(std::fmod((float)0, (float)0)), float>::value), "");
static_assert((std::is_same<decltype(std::fmod((bool)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(std::fmod((unsigned short)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(std::fmod((float)0, (unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::fmod((double)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::fmod((int)0, (long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::fmod((int)0, (unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::fmod((double)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(std::fmod((float)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(std::fmodf(0,0)), float>::value), "");
static_assert((std::is_same<decltype(std::fmod((int)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(fmod(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::fmod(1.5, 1) == .5);
assert(std::fmod(1.5, 1.) == .5);
assert(std::fmod(1.5, 1.f) == .5);
assert(std::fmod(1.5f, 1) == .5);
assert(std::fmod(1.5f, 1.) == .5);
assert(std::fmod(1.5f, 1.f) == .5);
assert(std::fmod(2, 1) == 0);
assert(std::fmod(2, 1.) == 0);
assert(std::fmod(2, 1.f) == 0);
}
__device__ void test_frexp()
{
int ip;
static_assert((std::is_same<decltype(std::frexp((float)0, &ip)), float>::value), "");
static_assert((std::is_same<decltype(std::frexp((bool)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(std::frexp((unsigned short)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(std::frexp((int)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(std::frexp((unsigned int)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(std::frexp((long)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(std::frexp((unsigned long)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(std::frexp((long long)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(std::frexp((unsigned long long)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(std::frexp((double)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(std::frexpf(0, &ip)), float>::value), "");
static_assert((std::is_same<decltype(frexp(Ambiguous(), &ip)), Ambiguous>::value), "");
assert(std::frexp(0, &ip) == 0);
assert(std::frexp(0., &ip) == 0);
assert(std::frexp(0.f, &ip) == 0);
}
__device__ void test_ldexp()
{
int ip = 1;
static_assert((std::is_same<decltype(std::ldexp((float)0, ip)), float>::value), "");
static_assert((std::is_same<decltype(std::ldexp((bool)0, ip)), double>::value), "");
static_assert((std::is_same<decltype(std::ldexp((unsigned short)0, ip)), double>::value), "");
static_assert((std::is_same<decltype(std::ldexp((int)0, ip)), double>::value), "");
static_assert((std::is_same<decltype(std::ldexp((unsigned int)0, ip)), double>::value), "");
static_assert((std::is_same<decltype(std::ldexp((long)0, ip)), double>::value), "");
static_assert((std::is_same<decltype(std::ldexp((unsigned long)0, ip)), double>::value), "");
static_assert((std::is_same<decltype(std::ldexp((long long)0, ip)), double>::value), "");
static_assert((std::is_same<decltype(std::ldexp((unsigned long long)0, ip)), double>::value), "");
static_assert((std::is_same<decltype(std::ldexp((double)0, ip)), double>::value), "");
static_assert((std::is_same<decltype(std::ldexpf(0, ip)), float>::value), "");
static_assert((std::is_same<decltype(ldexp(Ambiguous(), ip)), Ambiguous>::value), "");
assert(std::ldexp(1, ip) == 2);
assert(std::ldexp(1., ip) == 2);
assert(std::ldexp(1.f, ip) == 2);
}
__device__ void test_log()
{
static_assert((std::is_same<decltype(std::log((float)0)), float>::value), "");
static_assert((std::is_same<decltype(std::log((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(std::log((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(std::log((int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::log((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::log((long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::log((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::log((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::log((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::log((double)0)), double>::value), "");
static_assert((std::is_same<decltype(std::logf(0)), float>::value), "");
static_assert((std::is_same<decltype(log(Ambiguous())), Ambiguous>::value), "");
assert(std::log(1) == 0);
assert(std::log(1.) == 0);
assert(std::log(1.f) == 0);
}
__device__ void test_log10()
{
static_assert((std::is_same<decltype(std::log10((float)0)), float>::value), "");
static_assert((std::is_same<decltype(std::log10((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(std::log10((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(std::log10((int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::log10((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::log10((long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::log10((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::log10((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::log10((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::log10((double)0)), double>::value), "");
static_assert((std::is_same<decltype(std::log10f(0)), float>::value), "");
static_assert((std::is_same<decltype(log10(Ambiguous())), Ambiguous>::value), "");
assert(std::log10(1) == 0);
assert(std::log10(1.) == 0);
assert(std::log10(1.f) == 0);
}
__device__ void test_modf()
{
static_assert((std::is_same<decltype(std::modf((float)0, (float*)0)), float>::value), "");
static_assert((std::is_same<decltype(std::modf((double)0, (double*)0)), double>::value), "");
static_assert((std::is_same<decltype(std::modff(0, (float*)0)), float>::value), "");
static_assert((std::is_same<decltype(modf(Ambiguous(), (Ambiguous*)0)), Ambiguous>::value), "");
double i;
assert(std::modf(1, &i) == 0);
assert(std::modf(1., &i) == 0);
assert(std::modf(1.f, &i) == 0);
}
__device__ void test_pow()
{
static_assert((std::is_same<decltype(std::pow((float)0, (float)0)), float>::value), "");
static_assert((std::is_same<decltype(std::pow((bool)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(std::pow((unsigned short)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(std::pow((float)0, (unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::pow((double)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::pow((int)0, (long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::pow((int)0, (unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::pow((double)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(std::pow((float)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(std::powf(0,0)), float>::value), "");
static_assert((std::is_same<decltype(std::pow((int)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(pow(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::pow(1, 1) == 1);
assert(std::pow(1., 1) == 1);
assert(std::pow(1.f, 1) == 1);
assert(std::pow(1, 1.) == 1);
assert(std::pow(1., 1.) == 1);
assert(std::pow(1.f, 1.) == 1);
assert(std::pow(1, 1.f) == 1);
assert(std::pow(1., 1.f) == 1);
assert(std::pow(1.f, 1.f) == 1);
}
__device__ void test_sin()
{
static_assert((std::is_same<decltype(std::sin((float)0)), float>::value), "");
static_assert((std::is_same<decltype(std::sin((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(std::sin((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(std::sin((int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::sin((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::sin((long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::sin((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::sin((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::sin((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::sin((double)0)), double>::value), "");
static_assert((std::is_same<decltype(std::sinf(0)), float>::value), "");
static_assert((std::is_same<decltype(sin(Ambiguous())), Ambiguous>::value), "");
assert(std::sin(0) == 0);
assert(std::sin(0.) == 0);
assert(std::sin(0.f) == 0);
}
__device__ void test_sinh()
{
static_assert((std::is_same<decltype(std::sinh((float)0)), float>::value), "");
static_assert((std::is_same<decltype(std::sinh((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(std::sinh((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(std::sinh((int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::sinh((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::sinh((long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::sinh((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::sinh((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::sinh((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::sinh((double)0)), double>::value), "");
static_assert((std::is_same<decltype(std::sinhf(0)), float>::value), "");
static_assert((std::is_same<decltype(sinh(Ambiguous())), Ambiguous>::value), "");
assert(std::sinh(0) == 0);
assert(std::sinh(0.) == 0);
assert(std::sinh(0.f) == 0);
}
__device__ void test_sqrt()
{
static_assert((std::is_same<decltype(std::sqrt((float)0)), float>::value), "");
static_assert((std::is_same<decltype(std::sqrt((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(std::sqrt((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(std::sqrt((int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::sqrt((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::sqrt((long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::sqrt((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::sqrt((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::sqrt((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::sqrt((double)0)), double>::value), "");
static_assert((std::is_same<decltype(std::sqrtf(0)), float>::value), "");
static_assert((std::is_same<decltype(sqrt(Ambiguous())), Ambiguous>::value), "");
assert(std::sqrt(4) == 2);
assert(std::sqrt(4.) == 2);
assert(std::sqrt(4.f) == 2);
}
__device__ void test_tan()
{
static_assert((std::is_same<decltype(std::tan((float)0)), float>::value), "");
static_assert((std::is_same<decltype(std::tan((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(std::tan((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(std::tan((int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::tan((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::tan((long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::tan((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::tan((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::tan((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::tan((double)0)), double>::value), "");
static_assert((std::is_same<decltype(std::tanf(0)), float>::value), "");
static_assert((std::is_same<decltype(tan(Ambiguous())), Ambiguous>::value), "");
assert(std::tan(0) == 0);
assert(std::tan(0.) == 0);
assert(std::tan(0.f) == 0);
}
__device__ void test_tanh()
{
static_assert((std::is_same<decltype(std::tanh((float)0)), float>::value), "");
static_assert((std::is_same<decltype(std::tanh((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(std::tanh((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(std::tanh((int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::tanh((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::tanh((long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::tanh((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::tanh((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::tanh((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::tanh((double)0)), double>::value), "");
static_assert((std::is_same<decltype(std::tanhf(0)), float>::value), "");
static_assert((std::is_same<decltype(tanh(Ambiguous())), Ambiguous>::value), "");
assert(std::tanh(0) == 0);
assert(std::tanh(0.) == 0);
assert(std::tanh(0.f) == 0);
}
__device__ void test_signbit()
{
#ifdef signbit
#error signbit defined
#endif
static_assert((std::is_same<decltype(std::signbit((float)0)), bool>::value), "");
static_assert((std::is_same<decltype(std::signbit((double)0)), bool>::value), "");
static_assert((std::is_same<decltype(std::signbit(0)), bool>::value), "");
static_assert((std::is_same<decltype(signbit(Ambiguous())), Ambiguous>::value), "");
assert(std::signbit(-1) == true);
assert(std::signbit(-1.) == true);
assert(std::signbit(-1.f) == true);
}
__device__ void test_fpclassify()
{
#ifdef fpclassify
#error fpclassify defined
#endif
static_assert((std::is_same<decltype(std::fpclassify((float)0)), int>::value), "");
static_assert((std::is_same<decltype(std::fpclassify((double)0)), int>::value), "");
static_assert((std::is_same<decltype(std::fpclassify(0)), int>::value), "");
static_assert((std::is_same<decltype(fpclassify(Ambiguous())), Ambiguous>::value), "");
assert(std::fpclassify(-1) == FP_NORMAL);
assert(std::fpclassify(-1.) == FP_NORMAL);
assert(std::fpclassify(-1.f) == FP_NORMAL);
}
__device__ void test_isfinite()
{
#ifdef isfinite
#error isfinite defined
#endif
static_assert((std::is_same<decltype(std::isfinite((float)0)), bool>::value), "");
static_assert((std::is_same<decltype(std::isfinite((double)0)), bool>::value), "");
static_assert((std::is_same<decltype(std::isfinite(0)), bool>::value), "");
static_assert((std::is_same<decltype(isfinite(Ambiguous())), Ambiguous>::value), "");
assert(std::isfinite(-1) == true);
assert(std::isfinite(-1.) == true);
assert(std::isfinite(-1.f) == true);
}
__device__ void test_isnormal()
{
#ifdef isnormal
#error isnormal defined
#endif
static_assert((std::is_same<decltype(std::isnormal((float)0)), bool>::value), "");
static_assert((std::is_same<decltype(std::isnormal((double)0)), bool>::value), "");
static_assert((std::is_same<decltype(std::isnormal(0)), bool>::value), "");
static_assert((std::is_same<decltype(isnormal(Ambiguous())), Ambiguous>::value), "");
assert(std::isnormal(-1) == true);
assert(std::isnormal(-1.) == true);
assert(std::isnormal(-1.f) == true);
}
__device__ void test_isgreater()
{
#ifdef isgreater
#error isgreater defined
#endif
static_assert((std::is_same<decltype(std::isgreater((float)0, (float)0)), bool>::value), "");
static_assert((std::is_same<decltype(std::isgreater((float)0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(std::isgreater((double)0, (float)0)), bool>::value), "");
static_assert((std::is_same<decltype(std::isgreater((double)0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(std::isgreater(0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(isgreater(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::isgreater(-1, 0) == false);
assert(std::isgreater(-1, 0.) == false);
assert(std::isgreater(-1, 0.f) == false);
assert(std::isgreater(-1., 0) == false);
assert(std::isgreater(-1., 0.) == false);
assert(std::isgreater(-1., 0.f) == false);
assert(std::isgreater(-1.f, 0) == false);
assert(std::isgreater(-1.f, 0.) == false);
assert(std::isgreater(-1.f, 0.f) == false);
}
__device__ void test_isgreaterequal()
{
#ifdef isgreaterequal
#error isgreaterequal defined
#endif
static_assert((std::is_same<decltype(std::isgreaterequal((float)0, (float)0)), bool>::value), "");
static_assert((std::is_same<decltype(std::isgreaterequal((float)0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(std::isgreaterequal((double)0, (float)0)), bool>::value), "");
static_assert((std::is_same<decltype(std::isgreaterequal((double)0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(std::isgreaterequal(0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(isgreaterequal(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::isgreaterequal(-1, 0) == false);
assert(std::isgreaterequal(-1, 0.) == false);
assert(std::isgreaterequal(-1, 0.f) == false);
assert(std::isgreaterequal(-1., 0) == false);
assert(std::isgreaterequal(-1., 0.) == false);
assert(std::isgreaterequal(-1., 0.f) == false);
assert(std::isgreaterequal(-1.f, 0) == false);
assert(std::isgreaterequal(-1.f, 0.) == false);
assert(std::isgreaterequal(-1.f, 0.f) == false);
}
__device__ void test_isinf()
{
#ifdef isinf
#error isinf defined
#endif
static_assert((std::is_same<decltype(std::isinf((float)0)), bool>::value), "");
typedef decltype(std::isinf((double)0)) DoubleRetType;
#ifndef __linux__
static_assert((std::is_same<DoubleRetType, bool>::value), "");
#else
// GLIBC < 2.26 defines 'isinf(double)' with a return type of 'int' in
// all C++ dialects. The test should tolerate this.
// See: https://sourceware.org/bugzilla/show_bug.cgi?id=19439
static_assert((std::is_same<DoubleRetType, bool>::value
|| std::is_same<DoubleRetType, int>::value), "");
#endif
static_assert((std::is_same<decltype(std::isinf(0)), bool>::value), "");
assert(std::isinf(-1) == false);
assert(std::isinf(-1.) == false);
assert(std::isinf(-1.f) == false);
}
__device__ void test_isless()
{
#ifdef isless
#error isless defined
#endif
static_assert((std::is_same<decltype(std::isless((float)0, (float)0)), bool>::value), "");
static_assert((std::is_same<decltype(std::isless((float)0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(std::isless((double)0, (float)0)), bool>::value), "");
static_assert((std::is_same<decltype(std::isless((double)0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(std::isless(0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(isless(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::isless(-1, 0) == true);
assert(std::isless(-1, 0.) == true);
assert(std::isless(-1, 0.f) == true);
assert(std::isless(-1., 0) == true);
assert(std::isless(-1., 0.) == true);
assert(std::isless(-1., 0.f) == true);
assert(std::isless(-1.f, 0) == true);
assert(std::isless(-1.f, 0.) == true);
assert(std::isless(-1.f, 0.f) == true);
}
__device__ void test_islessequal()
{
#ifdef islessequal
#error islessequal defined
#endif
static_assert((std::is_same<decltype(std::islessequal((float)0, (float)0)), bool>::value), "");
static_assert((std::is_same<decltype(std::islessequal((float)0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(std::islessequal((double)0, (float)0)), bool>::value), "");
static_assert((std::is_same<decltype(std::islessequal((double)0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(std::islessequal(0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(islessequal(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::islessequal(-1, 0) == true);
assert(std::islessequal(-1, 0.) == true);
assert(std::islessequal(-1, 0.f) == true);
assert(std::islessequal(-1., 0) == true);
assert(std::islessequal(-1., 0.) == true);
assert(std::islessequal(-1., 0.f) == true);
assert(std::islessequal(-1.f, 0) == true);
assert(std::islessequal(-1.f, 0.) == true);
assert(std::islessequal(-1.f, 0.f) == true);
}
__device__ void test_islessgreater()
{
#ifdef islessgreater
#error islessgreater defined
#endif
static_assert((std::is_same<decltype(std::islessgreater((float)0, (float)0)), bool>::value), "");
static_assert((std::is_same<decltype(std::islessgreater((float)0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(std::islessgreater((double)0, (float)0)), bool>::value), "");
static_assert((std::is_same<decltype(std::islessgreater((double)0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(std::islessgreater(0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(islessgreater(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::islessgreater(-1, 0) == true);
assert(std::islessgreater(-1, 0.) == true);
assert(std::islessgreater(-1, 0.f) == true);
assert(std::islessgreater(-1., 0) == true);
assert(std::islessgreater(-1., 0.) == true);
assert(std::islessgreater(-1., 0.f) == true);
assert(std::islessgreater(-1.f, 0) == true);
assert(std::islessgreater(-1.f, 0.) == true);
assert(std::islessgreater(-1.f, 0.f) == true);
}
__device__ void test_isnan()
{
#ifdef isnan
#error isnan defined
#endif
static_assert((std::is_same<decltype(std::isnan((float)0)), bool>::value), "");
typedef decltype(std::isnan((double)0)) DoubleRetType;
#ifndef __linux__
static_assert((std::is_same<DoubleRetType, bool>::value), "");
#else
// GLIBC < 2.26 defines 'isnan(double)' with a return type of 'int' in
// all C++ dialects. The test should tolerate this.
// See: https://sourceware.org/bugzilla/show_bug.cgi?id=19439
static_assert((std::is_same<DoubleRetType, bool>::value
|| std::is_same<DoubleRetType, int>::value), "");
#endif
static_assert((std::is_same<decltype(std::isnan(0)), bool>::value), "");
assert(std::isnan(-1) == false);
assert(std::isnan(-1.) == false);
assert(std::isnan(-1.f) == false);
}
__device__ void test_isunordered()
{
#ifdef isunordered
#error isunordered defined
#endif
static_assert((std::is_same<decltype(std::isunordered((float)0, (float)0)), bool>::value), "");
static_assert((std::is_same<decltype(std::isunordered((float)0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(std::isunordered((double)0, (float)0)), bool>::value), "");
static_assert((std::is_same<decltype(std::isunordered((double)0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(std::isunordered(0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(isunordered(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::isunordered(-1, 0) == false);
assert(std::isunordered(-1, 0.) == false);
assert(std::isunordered(-1, 0.f) == false);
assert(std::isunordered(-1., 0) == false);
assert(std::isunordered(-1., 0.) == false);
assert(std::isunordered(-1., 0.f) == false);
assert(std::isunordered(-1.f, 0) == false);
assert(std::isunordered(-1.f, 0.) == false);
assert(std::isunordered(-1.f, 0.f) == false);
}
__device__ void test_acosh()
{
static_assert((std::is_same<decltype(std::acosh((float)0)), float>::value), "");
static_assert((std::is_same<decltype(std::acosh((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(std::acosh((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(std::acosh((int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::acosh((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::acosh((long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::acosh((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::acosh((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::acosh((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::acosh((double)0)), double>::value), "");
static_assert((std::is_same<decltype(std::acoshf(0)), float>::value), "");
static_assert((std::is_same<decltype(acosh(Ambiguous())), Ambiguous>::value), "");
assert(std::acosh(1) == 0);
assert(std::acosh(1.) == 0);
assert(std::acosh(1.f) == 0);
}
__device__ void test_asinh()
{
static_assert((std::is_same<decltype(std::asinh((float)0)), float>::value), "");
static_assert((std::is_same<decltype(std::asinh((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(std::asinh((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(std::asinh((int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::asinh((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::asinh((long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::asinh((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::asinh((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::asinh((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::asinh((double)0)), double>::value), "");
static_assert((std::is_same<decltype(std::asinhf(0)), float>::value), "");
static_assert((std::is_same<decltype(asinh(Ambiguous())), Ambiguous>::value), "");
assert(std::asinh(0) == 0);
assert(std::asinh(0.) == 0);
assert(std::asinh(0.f) == 0);
}
__device__ void test_atanh()
{
static_assert((std::is_same<decltype(std::atanh((float)0)), float>::value), "");
static_assert((std::is_same<decltype(std::atanh((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(std::atanh((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(std::atanh((int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::atanh((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::atanh((long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::atanh((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::atanh((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::atanh((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::atanh((double)0)), double>::value), "");
static_assert((std::is_same<decltype(std::atanhf(0)), float>::value), "");
static_assert((std::is_same<decltype(atanh(Ambiguous())), Ambiguous>::value), "");
assert(std::atanh(0) == 0);
assert(std::atanh(0.) == 0);
assert(std::atanh(0.f) == 0);
}
__device__ void test_cbrt()
{
static_assert((std::is_same<decltype(std::cbrt((float)0)), float>::value), "");
static_assert((std::is_same<decltype(std::cbrt((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(std::cbrt((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(std::cbrt((int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::cbrt((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::cbrt((long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::cbrt((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::cbrt((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::cbrt((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::cbrt((double)0)), double>::value), "");
static_assert((std::is_same<decltype(std::cbrtf(0)), float>::value), "");
static_assert((std::is_same<decltype(cbrt(Ambiguous())), Ambiguous>::value), "");
assert(std::cbrt(1) == 1);
assert(std::cbrt(1.) == 1);
assert(std::cbrt(1.f) == 1);
}
__device__ void test_copysign()
{
static_assert((std::is_same<decltype(std::copysign((float)0, (float)0)), float>::value), "");
static_assert((std::is_same<decltype(std::copysign((bool)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(std::copysign((unsigned short)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(std::copysign((float)0, (unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::copysign((double)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::copysign((int)0, (long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::copysign((int)0, (unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::copysign((double)0, (double)0)), double>::value), "");
// CUDA's copysign(float, double) returns a float. This is not per spec,
// but it's kind of reasonable -- given that copysign just copies the sign
// of the LHS to the RHS, there's no reason that we should have to promote
// the LHS from float to double.
//static_assert((std::is_same<decltype(std::copysign((float)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(std::copysignf(0,0)), float>::value), "");
static_assert((std::is_same<decltype(std::copysign((int)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(copysign(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::copysign(1, 1) == 1);
assert(std::copysign(1., 1) == 1);
assert(std::copysign(1.f, 1) == 1);
assert(std::copysign(1, 1.) == 1);
assert(std::copysign(1., 1.) == 1);
assert(std::copysign(1.f, 1.) == 1);
assert(std::copysign(1, 1.f) == 1);
assert(std::copysign(1., 1.f) == 1);
assert(std::copysign(1.f, 1.f) == 1);
}
__device__ void test_erf()
{
static_assert((std::is_same<decltype(std::erf((float)0)), float>::value), "");
static_assert((std::is_same<decltype(std::erf((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(std::erf((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(std::erf((int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::erf((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::erf((long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::erf((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::erf((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::erf((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::erf((double)0)), double>::value), "");
static_assert((std::is_same<decltype(std::erff(0)), float>::value), "");
static_assert((std::is_same<decltype(erf(Ambiguous())), Ambiguous>::value), "");
assert(std::erf(0) == 0);
assert(std::erf(0.) == 0);
assert(std::erf(0.f) == 0);
}
__device__ void test_erfc()
{
static_assert((std::is_same<decltype(std::erfc((float)0)), float>::value), "");
static_assert((std::is_same<decltype(std::erfc((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(std::erfc((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(std::erfc((int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::erfc((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::erfc((long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::erfc((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::erfc((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::erfc((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::erfc((double)0)), double>::value), "");
static_assert((std::is_same<decltype(std::erfcf(0)), float>::value), "");
static_assert((std::is_same<decltype(erfc(Ambiguous())), Ambiguous>::value), "");
assert(std::erfc(0) == 1);
assert(std::erfc(0.) == 1);
assert(std::erfc(0.f) == 1);
}
__device__ void test_exp2()
{
static_assert((std::is_same<decltype(std::exp2((float)0)), float>::value), "");
static_assert((std::is_same<decltype(std::exp2((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(std::exp2((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(std::exp2((int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::exp2((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::exp2((long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::exp2((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::exp2((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::exp2((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::exp2((double)0)), double>::value), "");
static_assert((std::is_same<decltype(std::exp2f(0)), float>::value), "");
static_assert((std::is_same<decltype(exp2(Ambiguous())), Ambiguous>::value), "");
assert(std::exp2(1) == 2);
assert(std::exp2(1.) == 2);
assert(std::exp2(1.f) == 2);
}
__device__ void test_expm1()
{
static_assert((std::is_same<decltype(std::expm1((float)0)), float>::value), "");
static_assert((std::is_same<decltype(std::expm1((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(std::expm1((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(std::expm1((int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::expm1((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::expm1((long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::expm1((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::expm1((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::expm1((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::expm1((double)0)), double>::value), "");
static_assert((std::is_same<decltype(std::expm1f(0)), float>::value), "");
static_assert((std::is_same<decltype(expm1(Ambiguous())), Ambiguous>::value), "");
assert(std::expm1(0) == 0);
assert(std::expm1(0.) == 0);
assert(std::expm1(0.f) == 0);
}
__device__ void test_fdim()
{
static_assert((std::is_same<decltype(std::fdim((float)0, (float)0)), float>::value), "");
static_assert((std::is_same<decltype(std::fdim((bool)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(std::fdim((unsigned short)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(std::fdim((float)0, (unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::fdim((double)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::fdim((int)0, (long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::fdim((int)0, (unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::fdim((double)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(std::fdim((float)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(std::fdimf(0,0)), float>::value), "");
static_assert((std::is_same<decltype(std::fdim((int)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(fdim(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::fdim(1, 0) == 1);
assert(std::fdim(1., 0) == 1);
assert(std::fdim(1.f, 0) == 1);
assert(std::fdim(1, 0.) == 1);
assert(std::fdim(1., 0.) == 1);
assert(std::fdim(1.f, 0.) == 1);
assert(std::fdim(1, 0.f) == 1);
assert(std::fdim(1., 0.f) == 1);
assert(std::fdim(1.f, 0.f) == 1);
}
__device__ void test_fma()
{
static_assert((std::is_same<decltype(std::fma((bool)0, (float)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(std::fma((char)0, (float)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(std::fma((unsigned)0, (float)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(std::fma((float)0, (int)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(std::fma((float)0, (long)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(std::fma((float)0, (float)0, (unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::fma((float)0, (float)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(std::fma((float)0, (float)0, (float)0)), float>::value), "");
static_assert((std::is_same<decltype(std::fma((bool)0, (double)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(std::fma((char)0, (double)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(std::fma((unsigned)0, (double)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(std::fma((double)0, (int)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(std::fma((double)0, (long)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(std::fma((double)0, (double)0, (unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::fma((double)0, (double)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(std::fma((double)0, (double)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(std::fmaf(0,0,0)), float>::value), "");
static_assert((std::is_same<decltype(fma(Ambiguous(), Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::fma(1, 1, 1) == 2);
assert(std::fma(1., 1, 1) == 2);
assert(std::fma(1.f, 1, 1) == 2);
assert(std::fma(1, 1., 1) == 2);
assert(std::fma(1., 1., 1) == 2);
assert(std::fma(1.f, 1., 1) == 2);
assert(std::fma(1, 1.f, 1) == 2);
assert(std::fma(1., 1.f, 1) == 2);
assert(std::fma(1.f, 1.f, 1) == 2);
assert(std::fma(1, 1, 1.) == 2);
assert(std::fma(1., 1, 1.) == 2);
assert(std::fma(1.f, 1, 1.) == 2);
assert(std::fma(1, 1., 1.) == 2);
assert(std::fma(1., 1., 1.) == 2);
assert(std::fma(1.f, 1., 1.) == 2);
assert(std::fma(1, 1.f, 1.) == 2);
assert(std::fma(1., 1.f, 1.) == 2);
assert(std::fma(1.f, 1.f, 1.) == 2);
assert(std::fma(1, 1, 1.f) == 2);
assert(std::fma(1., 1, 1.f) == 2);
assert(std::fma(1.f, 1, 1.f) == 2);
assert(std::fma(1, 1., 1.f) == 2);
assert(std::fma(1., 1., 1.f) == 2);
assert(std::fma(1.f, 1., 1.f) == 2);
assert(std::fma(1, 1.f, 1.f) == 2);
assert(std::fma(1., 1.f, 1.f) == 2);
assert(std::fma(1.f, 1.f, 1.f) == 2);
}
__device__ void test_fmax()
{
static_assert((std::is_same<decltype(std::fmax((float)0, (float)0)), float>::value), "");
static_assert((std::is_same<decltype(std::fmax((bool)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(std::fmax((unsigned short)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(std::fmax((float)0, (unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::fmax((double)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::fmax((int)0, (long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::fmax((int)0, (unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::fmax((double)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(std::fmax((float)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(std::fmaxf(0,0)), float>::value), "");
static_assert((std::is_same<decltype(std::fmax((int)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(fmax(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::fmax(1, 0) == 1);
assert(std::fmax(1., 0) == 1);
assert(std::fmax(1.f, 0) == 1);
assert(std::fmax(1, 0.) == 1);
assert(std::fmax(1., 0.) == 1);
assert(std::fmax(1.f, 0.) == 1);
assert(std::fmax(1, 0.f) == 1);
assert(std::fmax(1., 0.f) == 1);
assert(std::fmax(1.f, 0.f) == 1);
}
__device__ void test_fmin()
{
static_assert((std::is_same<decltype(std::fmin((float)0, (float)0)), float>::value), "");
static_assert((std::is_same<decltype(std::fmin((bool)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(std::fmin((unsigned short)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(std::fmin((float)0, (unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::fmin((double)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::fmin((int)0, (long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::fmin((int)0, (unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::fmin((double)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(std::fmin((float)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(std::fminf(0,0)), float>::value), "");
static_assert((std::is_same<decltype(std::fmin((int)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(fmin(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::fmin(1, 0) == 0);
assert(std::fmin(1., 0) == 0);
assert(std::fmin(1.f, 0) == 0);
assert(std::fmin(1, 0.) == 0);
assert(std::fmin(1., 0.) == 0);
assert(std::fmin(1.f, 0.) == 0);
assert(std::fmin(1, 0.f) == 0);
assert(std::fmin(1., 0.f) == 0);
assert(std::fmin(1.f, 0.f) == 0);
}
__device__ void test_hypot()
{
static_assert((std::is_same<decltype(std::hypot((float)0, (float)0)), float>::value), "");
static_assert((std::is_same<decltype(std::hypot((bool)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(std::hypot((unsigned short)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(std::hypot((float)0, (unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::hypot((double)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::hypot((int)0, (long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::hypot((int)0, (unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::hypot((double)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(std::hypot((float)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(std::hypotf(0,0)), float>::value), "");
static_assert((std::is_same<decltype(std::hypot((int)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(hypot(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::hypot(3, 4) == 5);
assert(std::hypot(3, 4.) == 5);
assert(std::hypot(3, 4.f) == 5);
assert(std::hypot(3., 4) == 5);
assert(std::hypot(3., 4.) == 5);
assert(std::hypot(3., 4.f) == 5);
assert(std::hypot(3.f, 4) == 5);
assert(std::hypot(3.f, 4.) == 5);
assert(std::hypot(3.f, 4.f) == 5);
// CUDA does not provide 3-argument hypot().
#if 0 // __cplusplus >= 201703L && STDLIB_VERSION >= 2017
static_assert((std::is_same<decltype(std::hypot((float)0, (float)0, (float)0)), float>::value), "");
static_assert((std::is_same<decltype(std::hypot((float)0, (bool)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(std::hypot((float)0, (unsigned short)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(std::hypot((float)0, (unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::hypot((float)0, (double)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::hypot((float)0, (int)0, (long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::hypot((float)0, (int)0, (unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::hypot((float)0, (double)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(std::hypot((float)0, (float)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(std::hypot((int)0, (int)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(hypot(Ambiguous(), Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::hypot(2, 3, 6) == 7);
assert(std::hypot(1, 4, 8) == 9);
#endif
}
__device__ void test_ilogb()
{
static_assert((std::is_same<decltype(std::ilogb((float)0)), int>::value), "");
static_assert((std::is_same<decltype(std::ilogb((bool)0)), int>::value), "");
static_assert((std::is_same<decltype(std::ilogb((unsigned short)0)), int>::value), "");
static_assert((std::is_same<decltype(std::ilogb((int)0)), int>::value), "");
static_assert((std::is_same<decltype(std::ilogb((unsigned int)0)), int>::value), "");
static_assert((std::is_same<decltype(std::ilogb((long)0)), int>::value), "");
static_assert((std::is_same<decltype(std::ilogb((unsigned long)0)), int>::value), "");
static_assert((std::is_same<decltype(std::ilogb((long long)0)), int>::value), "");
static_assert((std::is_same<decltype(std::ilogb((unsigned long long)0)), int>::value), "");
static_assert((std::is_same<decltype(std::ilogb((double)0)), int>::value), "");
static_assert((std::is_same<decltype(std::ilogbf(0)), int>::value), "");
// No CUDA ilogbl (takes a long double).
//static_assert((std::is_same<decltype(std::ilogbl(0)), int>::value), "");
static_assert((std::is_same<decltype(ilogb(Ambiguous())), Ambiguous>::value), "");
assert(std::ilogb(1) == 0);
assert(std::ilogb(1.) == 0);
assert(std::ilogb(1.f) == 0);
}
__device__ void test_lgamma()
{
static_assert((std::is_same<decltype(std::lgamma((float)0)), float>::value), "");
static_assert((std::is_same<decltype(std::lgamma((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(std::lgamma((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(std::lgamma((int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::lgamma((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::lgamma((long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::lgamma((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::lgamma((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::lgamma((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::lgamma((double)0)), double>::value), "");
static_assert((std::is_same<decltype(std::lgammaf(0)), float>::value), "");
static_assert((std::is_same<decltype(lgamma(Ambiguous())), Ambiguous>::value), "");
assert(std::lgamma(1) == 0);
}
__device__ void test_llrint()
{
static_assert((std::is_same<decltype(std::llrint((float)0)), long long>::value), "");
static_assert((std::is_same<decltype(std::llrint((bool)0)), long long>::value), "");
static_assert((std::is_same<decltype(std::llrint((unsigned short)0)), long long>::value), "");
static_assert((std::is_same<decltype(std::llrint((int)0)), long long>::value), "");
static_assert((std::is_same<decltype(std::llrint((unsigned int)0)), long long>::value), "");
static_assert((std::is_same<decltype(std::llrint((long)0)), long long>::value), "");
static_assert((std::is_same<decltype(std::llrint((unsigned long)0)), long long>::value), "");
static_assert((std::is_same<decltype(std::llrint((long long)0)), long long>::value), "");
static_assert((std::is_same<decltype(std::llrint((unsigned long long)0)), long long>::value), "");
static_assert((std::is_same<decltype(std::llrint((double)0)), long long>::value), "");
static_assert((std::is_same<decltype(std::llrintf(0)), long long>::value), "");
// No CUDA llrintl (takes a long double).
//static_assert((std::is_same<decltype(std::llrintl(0)), long long>::value), "");
static_assert((std::is_same<decltype(llrint(Ambiguous())), Ambiguous>::value), "");
assert(std::llrint(1) == 1LL);
assert(std::llrint(1.) == 1LL);
#if CUDA_VERSION > 7050
assert(std::llrint(1.f) == 1LL);
#endif
}
__device__ void test_llround()
{
static_assert((std::is_same<decltype(std::llround((float)0)), long long>::value), "");
static_assert((std::is_same<decltype(std::llround((bool)0)), long long>::value), "");
static_assert((std::is_same<decltype(std::llround((unsigned short)0)), long long>::value), "");
static_assert((std::is_same<decltype(std::llround((int)0)), long long>::value), "");
static_assert((std::is_same<decltype(std::llround((unsigned int)0)), long long>::value), "");
static_assert((std::is_same<decltype(std::llround((long)0)), long long>::value), "");
static_assert((std::is_same<decltype(std::llround((unsigned long)0)), long long>::value), "");
static_assert((std::is_same<decltype(std::llround((long long)0)), long long>::value), "");
static_assert((std::is_same<decltype(std::llround((unsigned long long)0)), long long>::value), "");
static_assert((std::is_same<decltype(std::llround((double)0)), long long>::value), "");
static_assert((std::is_same<decltype(std::llroundf(0)), long long>::value), "");
// No CUDA llroundl.
//static_assert((std::is_same<decltype(std::llroundl(0)), long long>::value), "");
static_assert((std::is_same<decltype(llround(Ambiguous())), Ambiguous>::value), "");
assert(std::llround(1) == 1LL);
assert(std::llround(1.) == 1LL);
assert(std::llround(1.f) == 1LL);
}
__device__ void test_log1p()
{
static_assert((std::is_same<decltype(std::log1p((float)0)), float>::value), "");
static_assert((std::is_same<decltype(std::log1p((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(std::log1p((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(std::log1p((int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::log1p((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::log1p((long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::log1p((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::log1p((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::log1p((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::log1p((double)0)), double>::value), "");
static_assert((std::is_same<decltype(std::log1pf(0)), float>::value), "");
static_assert((std::is_same<decltype(log1p(Ambiguous())), Ambiguous>::value), "");
assert(std::log1p(0) == 0);
assert(std::log1p(0.) == 0);
assert(std::log1p(0.f) == 0);
}
__device__ void test_log2()
{
static_assert((std::is_same<decltype(std::log2((float)0)), float>::value), "");
static_assert((std::is_same<decltype(std::log2((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(std::log2((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(std::log2((int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::log2((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::log2((long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::log2((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::log2((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::log2((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::log2((double)0)), double>::value), "");
static_assert((std::is_same<decltype(std::log2f(0)), float>::value), "");
static_assert((std::is_same<decltype(log2(Ambiguous())), Ambiguous>::value), "");
assert(std::log2(1) == 0);
assert(std::log2(1.) == 0);
assert(std::log2(1.f) == 0);
}
__device__ void test_logb()
{
static_assert((std::is_same<decltype(std::logb((float)0)), float>::value), "");
static_assert((std::is_same<decltype(std::logb((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(std::logb((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(std::logb((int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::logb((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::logb((long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::logb((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::logb((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::logb((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::logb((double)0)), double>::value), "");
static_assert((std::is_same<decltype(std::logbf(0)), float>::value), "");
static_assert((std::is_same<decltype(logb(Ambiguous())), Ambiguous>::value), "");
assert(std::logb(1) == 0);
assert(std::logb(1.) == 0);
assert(std::logb(1.f) == 0);
}
__device__ void test_lrint()
{
static_assert((std::is_same<decltype(std::lrint((float)0)), long>::value), "");
static_assert((std::is_same<decltype(std::lrint((bool)0)), long>::value), "");
static_assert((std::is_same<decltype(std::lrint((unsigned short)0)), long>::value), "");
static_assert((std::is_same<decltype(std::lrint((int)0)), long>::value), "");
static_assert((std::is_same<decltype(std::lrint((unsigned int)0)), long>::value), "");
static_assert((std::is_same<decltype(std::lrint((long)0)), long>::value), "");
static_assert((std::is_same<decltype(std::lrint((unsigned long)0)), long>::value), "");
static_assert((std::is_same<decltype(std::lrint((long long)0)), long>::value), "");
static_assert((std::is_same<decltype(std::lrint((unsigned long long)0)), long>::value), "");
static_assert((std::is_same<decltype(std::lrint((double)0)), long>::value), "");
static_assert((std::is_same<decltype(std::lrintf(0)), long>::value), "");
// No CUDA lrintl (takes a long double).
//static_assert((std::is_same<decltype(std::lrintl(0)), long>::value), "");
static_assert((std::is_same<decltype(lrint(Ambiguous())), Ambiguous>::value), "");
assert(std::lrint(1) == 1L);
assert(std::lrint(1.) == 1L);
#if CUDA_VERSION > 7050
assert(std::lrint(1.f) == 1L);
#endif
}
__device__ void test_lround()
{
static_assert((std::is_same<decltype(std::lround((float)0)), long>::value), "");
static_assert((std::is_same<decltype(std::lround((bool)0)), long>::value), "");
static_assert((std::is_same<decltype(std::lround((unsigned short)0)), long>::value), "");
static_assert((std::is_same<decltype(std::lround((int)0)), long>::value), "");
static_assert((std::is_same<decltype(std::lround((unsigned int)0)), long>::value), "");
static_assert((std::is_same<decltype(std::lround((long)0)), long>::value), "");
static_assert((std::is_same<decltype(std::lround((unsigned long)0)), long>::value), "");
static_assert((std::is_same<decltype(std::lround((long long)0)), long>::value), "");
static_assert((std::is_same<decltype(std::lround((unsigned long long)0)), long>::value), "");
static_assert((std::is_same<decltype(std::lround((double)0)), long>::value), "");
static_assert((std::is_same<decltype(std::lroundf(0)), long>::value), "");
// No CUDA lroundl (takes a long double).
//static_assert((std::is_same<decltype(std::lroundl(0)), long>::value), "");
static_assert((std::is_same<decltype(lround(Ambiguous())), Ambiguous>::value), "");
assert(std::lround(1) == 1L);
assert(std::lround(1.) == 1L);
assert(std::lround(1.f) == 1L);
}
__device__ void test_nan()
{
static_assert((std::is_same<decltype(std::nan("")), double>::value), "");
static_assert((std::is_same<decltype(std::nanf("")), float>::value), "");
}
__device__ void test_nearbyint()
{
static_assert((std::is_same<decltype(std::nearbyint((float)0)), float>::value), "");
static_assert((std::is_same<decltype(std::nearbyint((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(std::nearbyint((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(std::nearbyint((int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::nearbyint((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::nearbyint((long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::nearbyint((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::nearbyint((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::nearbyint((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::nearbyint((double)0)), double>::value), "");
static_assert((std::is_same<decltype(std::nearbyintf(0)), float>::value), "");
static_assert((std::is_same<decltype(nearbyint(Ambiguous())), Ambiguous>::value), "");
assert(std::nearbyint(1) == 1);
assert(std::nearbyint(1.) == 1);
assert(std::nearbyint(1.f) == 1);
}
__device__ void test_nextafter()
{
static_assert((std::is_same<decltype(std::nextafter((float)0, (float)0)), float>::value), "");
static_assert((std::is_same<decltype(std::nextafter((bool)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(std::nextafter((unsigned short)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(std::nextafter((float)0, (unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::nextafter((double)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::nextafter((int)0, (long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::nextafter((int)0, (unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::nextafter((double)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(std::nextafter((float)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(std::nextafterf(0,0)), float>::value), "");
static_assert((std::is_same<decltype(std::nextafter((int)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(nextafter(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
// Invoke all our overloads. Even though we don't check the exact result
// (this is pretty annoying to do for this function), we make sure to *use*
// the results so that these function calls can't be DCE'ed.
assert(std::nextafter(0, 1) != 0);
assert(std::nextafter(0, 1.) != 0);
assert(std::nextafter(0, 1.f) != 0);
assert(std::nextafter(0., 1) != 0);
assert(std::nextafter(0., 1.) != 0);
assert(std::nextafter(0., 1.f) != 0);
assert(std::nextafter(0.f, 1) != 0);
assert(std::nextafter(0.f, 1.) != 0);
assert(std::nextafter(0.f, 1.f) != 0);
}
__device__ void test_remainder()
{
static_assert((std::is_same<decltype(std::remainder((float)0, (float)0)), float>::value), "");
static_assert((std::is_same<decltype(std::remainder((bool)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(std::remainder((unsigned short)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(std::remainder((float)0, (unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::remainder((double)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::remainder((int)0, (long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::remainder((int)0, (unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::remainder((double)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(std::remainder((float)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(std::remainderf(0,0)), float>::value), "");
static_assert((std::is_same<decltype(std::remainder((int)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(remainder(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::remainder(1.5, 1) == -.5);
assert(std::remainder(1.5, 1.) == -.5);
assert(std::remainder(1.5, 1.f) == -.5);
assert(std::remainder(1.5f, 1) == -.5);
assert(std::remainder(1.5f, 1.) == -.5);
assert(std::remainder(1.5f, 1.f) == -.5);
assert(std::remainder(2, 1) == 0);
assert(std::remainder(2, 1.) == 0);
assert(std::remainder(2, 1.f) == 0);
}
__device__ void test_remquo()
{
int ip;
static_assert((std::is_same<decltype(std::remquo((float)0, (float)0, &ip)), float>::value), "");
static_assert((std::is_same<decltype(std::remquo((bool)0, (float)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(std::remquo((unsigned short)0, (double)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(std::remquo((float)0, (unsigned int)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(std::remquo((double)0, (long)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(std::remquo((int)0, (long long)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(std::remquo((int)0, (unsigned long long)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(std::remquo((double)0, (double)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(std::remquo((float)0, (double)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(std::remquof(0,0, &ip)), float>::value), "");
static_assert((std::is_same<decltype(std::remquo((int)0, (int)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(remquo(Ambiguous(), Ambiguous(), &ip)), Ambiguous>::value), "");
assert(std::remquo(1, 1, &ip) == 0);
assert(std::remquo(1, 1., &ip) == 0);
assert(std::remquo(1, 1.f, &ip) == 0);
assert(std::remquo(0.5, 1, &ip) == 0.5);
assert(std::remquo(0.5, 1., &ip) == 0.5);
assert(std::remquo(0.5, 1.f, &ip) == 0.5);
assert(std::remquo(0.5f, 1, &ip) == 0.5);
assert(std::remquo(0.5f, 1., &ip) == 0.5);
assert(std::remquo(0.5f, 1.f, &ip) == 0.5);
}
__device__ void test_rint()
{
static_assert((std::is_same<decltype(std::rint((float)0)), float>::value), "");
static_assert((std::is_same<decltype(std::rint((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(std::rint((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(std::rint((int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::rint((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::rint((long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::rint((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::rint((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::rint((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::rint((double)0)), double>::value), "");
static_assert((std::is_same<decltype(std::rintf(0)), float>::value), "");
static_assert((std::is_same<decltype(rint(Ambiguous())), Ambiguous>::value), "");
assert(std::rint(1) == 1);
assert(std::rint(1.) == 1);
assert(std::rint(1.f) == 1);
}
__device__ void test_round()
{
static_assert((std::is_same<decltype(std::round((float)0)), float>::value), "");
static_assert((std::is_same<decltype(std::round((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(std::round((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(std::round((int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::round((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::round((long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::round((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::round((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::round((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::round((double)0)), double>::value), "");
static_assert((std::is_same<decltype(std::roundf(0)), float>::value), "");
static_assert((std::is_same<decltype(round(Ambiguous())), Ambiguous>::value), "");
assert(std::round(1) == 1);
assert(std::round(1.) == 1);
assert(std::round(1.f) == 1);
}
__device__ void test_scalbln()
{
static_assert((std::is_same<decltype(std::scalbln((float)0, (long)0)), float>::value), "");
static_assert((std::is_same<decltype(std::scalbln((bool)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::scalbln((unsigned short)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::scalbln((int)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::scalbln((unsigned int)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::scalbln((long)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::scalbln((unsigned long)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::scalbln((long long)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::scalbln((unsigned long long)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::scalbln((double)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::scalblnf(0, (long)0)), float>::value), "");
static_assert((std::is_same<decltype(scalbln(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::scalbln(1, 1) == 2);
assert(std::scalbln(1, 1.) == 2);
assert(std::scalbln(1, 1.f) == 2);
assert(std::scalbln(1., 1) == 2);
assert(std::scalbln(1., 1.) == 2);
assert(std::scalbln(1., 1.f) == 2);
assert(std::scalbln(1.f, 1) == 2);
assert(std::scalbln(1.f, 1.) == 2);
assert(std::scalbln(1.f, 1.f) == 2);
}
__device__ void test_scalbn()
{
static_assert((std::is_same<decltype(std::scalbn((float)0, (int)0)), float>::value), "");
static_assert((std::is_same<decltype(std::scalbn((bool)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::scalbn((unsigned short)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::scalbn((int)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::scalbn((unsigned int)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::scalbn((long)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::scalbn((unsigned long)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::scalbn((long long)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::scalbn((unsigned long long)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::scalbn((double)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::scalbnf(0, (int)0)), float>::value), "");
static_assert((std::is_same<decltype(scalbn(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::scalbn(1, 1) == 2);
assert(std::scalbn(1, 1.) == 2);
assert(std::scalbn(1, 1.f) == 2);
assert(std::scalbn(1., 1) == 2);
assert(std::scalbn(1., 1.) == 2);
assert(std::scalbn(1., 1.f) == 2);
assert(std::scalbn(1.f, 1) == 2);
assert(std::scalbn(1.f, 1.) == 2);
assert(std::scalbn(1.f, 1.f) == 2);
}
__device__ void test_tgamma()
{
static_assert((std::is_same<decltype(std::tgamma((float)0)), float>::value), "");
static_assert((std::is_same<decltype(std::tgamma((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(std::tgamma((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(std::tgamma((int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::tgamma((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::tgamma((long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::tgamma((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::tgamma((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::tgamma((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::tgamma((double)0)), double>::value), "");
static_assert((std::is_same<decltype(std::tgammaf(0)), float>::value), "");
static_assert((std::is_same<decltype(tgamma(Ambiguous())), Ambiguous>::value), "");
assert(std::tgamma(1) == 1);
assert(std::tgamma(1.) == 1);
assert(std::tgamma(1.f) == 1);
}
__device__ void test_trunc()
{
static_assert((std::is_same<decltype(std::trunc((float)0)), float>::value), "");
static_assert((std::is_same<decltype(std::trunc((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(std::trunc((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(std::trunc((int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::trunc((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(std::trunc((long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::trunc((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::trunc((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::trunc((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(std::trunc((double)0)), double>::value), "");
static_assert((std::is_same<decltype(std::truncf(0)), float>::value), "");
static_assert((std::is_same<decltype(trunc(Ambiguous())), Ambiguous>::value), "");
assert(std::trunc(1) == 1);
assert(std::trunc(1.) == 1);
assert(std::trunc(1.f) == 1);
}
__global__ void tests()
{
test_abs();
test_acos();
test_asin();
test_atan();
test_atan2();
test_ceil();
test_cos();
test_cosh();
test_exp();
test_fabs();
test_floor();
test_fmod();
test_frexp();
test_ldexp();
test_log();
test_log10();
test_modf();
test_pow();
test_sin();
test_sinh();
test_sqrt();
test_tan();
test_tanh();
test_signbit();
test_fpclassify();
test_isfinite();
test_isnormal();
test_isgreater();
test_isgreaterequal();
test_isinf();
test_isless();
test_islessequal();
test_islessgreater();
test_isnan();
test_isunordered();
test_acosh();
test_asinh();
test_atanh();
test_cbrt();
test_copysign();
test_erf();
test_erfc();
test_exp2();
test_expm1();
test_fdim();
test_fma();
test_fmax();
test_fmin();
test_hypot();
test_ilogb();
test_lgamma();
test_llrint();
test_llround();
test_log1p();
test_log2();
test_logb();
test_lrint();
test_lround();
test_nan();
test_nearbyint();
test_nextafter();
test_remainder();
test_remquo();
test_rint();
test_round();
test_scalbln();
test_scalbn();
test_tgamma();
test_trunc();
}
int main() {
tests<<<1,1>>>();
cudaError_t err = cudaDeviceSynchronize();
if (err != cudaSuccess) {
printf("CUDA error %d\n", (int)err);
return 1;
}
printf("Success!\n");
return 0;
}
#else
#include <stdio.h>
// No C++11; test is a nop.
int main() {
printf("Success!\n");
return 0;
}
#endif // __cplusplus < 201103L
|
the_stack
|
#include "cuda_utils.h"
#include <vector>
template <typename scalar_t>
__global__ void chamfer_dist_kernel(int batch_size, int n, const scalar_t* __restrict__ xyz1, int m,
const scalar_t* __restrict__ xyz2, scalar_t* __restrict__ dist,
int* indexes)
{
const int batch = 512;
__shared__ scalar_t buf[batch * 3];
for (int i = blockIdx.x; i < batch_size; i += gridDim.x)
{
for (int k2 = 0; k2 < m; k2 += batch)
{
int end_k = min(m, k2 + batch) - k2;
for (int j = threadIdx.x; j < end_k * 3; j += blockDim.x)
{
buf[j] = xyz2[(i * m + k2) * 3 + j];
}
__syncthreads();
for (int j = threadIdx.x + blockIdx.y * blockDim.x; j < n; j += blockDim.x * gridDim.y)
{
scalar_t x1 = xyz1[(i * n + j) * 3 + 0];
scalar_t y1 = xyz1[(i * n + j) * 3 + 1];
scalar_t z1 = xyz1[(i * n + j) * 3 + 2];
scalar_t best_dist = 0;
int best_dist_index = 0;
int end_ka = end_k - (end_k & 3);
if (end_ka == batch)
{
for (int k = 0; k < batch; k += 4)
{
{
scalar_t x2 = buf[k * 3 + 0] - x1;
scalar_t y2 = buf[k * 3 + 1] - y1;
scalar_t z2 = buf[k * 3 + 2] - z1;
scalar_t dist = x2 * x2 + y2 * y2 + z2 * z2;
if (k == 0 || dist < best_dist)
{
best_dist = dist;
best_dist_index = k + k2;
}
}
{
scalar_t x2 = buf[k * 3 + 3] - x1;
scalar_t y2 = buf[k * 3 + 4] - y1;
scalar_t z2 = buf[k * 3 + 5] - z1;
scalar_t dist = x2 * x2 + y2 * y2 + z2 * z2;
if (dist < best_dist)
{
best_dist = dist;
best_dist_index = k + k2 + 1;
}
}
{
scalar_t x2 = buf[k * 3 + 6] - x1;
scalar_t y2 = buf[k * 3 + 7] - y1;
scalar_t z2 = buf[k * 3 + 8] - z1;
scalar_t dist = x2 * x2 + y2 * y2 + z2 * z2;
if (dist < best_dist)
{
best_dist = dist;
best_dist_index = k + k2 + 2;
}
}
{
scalar_t x2 = buf[k * 3 + 9] - x1;
scalar_t y2 = buf[k * 3 + 10] - y1;
scalar_t z2 = buf[k * 3 + 11] - z1;
scalar_t dist = x2 * x2 + y2 * y2 + z2 * z2;
if (dist < best_dist)
{
best_dist = dist;
best_dist_index = k + k2 + 3;
}
}
}
}
else
{
for (int k = 0; k < end_ka; k += 4)
{
{
scalar_t x2 = buf[k * 3 + 0] - x1;
scalar_t y2 = buf[k * 3 + 1] - y1;
scalar_t z2 = buf[k * 3 + 2] - z1;
scalar_t dist = x2 * x2 + y2 * y2 + z2 * z2;
if (k == 0 || dist < best_dist)
{
best_dist = dist;
best_dist_index = k + k2;
}
}
{
scalar_t x2 = buf[k * 3 + 3] - x1;
scalar_t y2 = buf[k * 3 + 4] - y1;
scalar_t z2 = buf[k * 3 + 5] - z1;
scalar_t dist = x2 * x2 + y2 * y2 + z2 * z2;
if (dist < best_dist)
{
best_dist = dist;
best_dist_index = k + k2 + 1;
}
}
{
scalar_t x2 = buf[k * 3 + 6] - x1;
scalar_t y2 = buf[k * 3 + 7] - y1;
scalar_t z2 = buf[k * 3 + 8] - z1;
scalar_t dist = x2 * x2 + y2 * y2 + z2 * z2;
if (dist < best_dist)
{
best_dist = dist;
best_dist_index = k + k2 + 2;
}
}
{
scalar_t x2 = buf[k * 3 + 9] - x1;
scalar_t y2 = buf[k * 3 + 10] - y1;
scalar_t z2 = buf[k * 3 + 11] - z1;
scalar_t dist = x2 * x2 + y2 * y2 + z2 * z2;
if (dist < best_dist)
{
best_dist = dist;
best_dist_index = k + k2 + 3;
}
}
}
}
for (int k = end_ka; k < end_k; k++)
{
scalar_t x2 = buf[k * 3 + 0] - x1;
scalar_t y2 = buf[k * 3 + 1] - y1;
scalar_t z2 = buf[k * 3 + 2] - z1;
scalar_t dist = x2 * x2 + y2 * y2 + z2 * z2;
if (k == 0 || dist < best_dist)
{
best_dist = dist;
best_dist_index = k + k2;
}
}
if (k2 == 0 || dist[(i * n + j)] > best_dist)
{
dist[(i * n + j)] = best_dist;
indexes[(i * n + j)] = best_dist_index;
}
}
__syncthreads();
}
}
}
std::vector<torch::Tensor> chamfer_dist_kernel_wrapper(torch::Tensor xyz1, torch::Tensor xyz2)
{
const int batch_size = xyz1.size(0);
const int n = xyz1.size(1); // num_points point cloud A
const int m = xyz2.size(1); // num_points point cloud B
torch::Tensor dist1 = torch::zeros({batch_size, n}, torch::CUDA(xyz1.scalar_type()));
torch::Tensor dist2 = torch::zeros({batch_size, m}, torch::CUDA(xyz1.scalar_type()));
torch::Tensor idx1 = torch::zeros({batch_size, n}, torch::CUDA(torch::kInt));
torch::Tensor idx2 = torch::zeros({batch_size, m}, torch::CUDA(torch::kInt));
AT_DISPATCH_FLOATING_TYPES(
xyz1.scalar_type(), "chamfer_dist_cuda", ([&] {
chamfer_dist_kernel<scalar_t><<<dim3(32, 16, 1), 512>>>(
batch_size, n, xyz1.data_ptr<scalar_t>(), m, xyz2.data_ptr<scalar_t>(),
dist1.data_ptr<scalar_t>(), idx1.data_ptr<int>());
chamfer_dist_kernel<scalar_t><<<dim3(32, 16, 1), 512>>>(
batch_size, m, xyz2.data_ptr<scalar_t>(), n, xyz1.data_ptr<scalar_t>(),
dist2.data_ptr<scalar_t>(), idx2.data_ptr<int>());
}));
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
{
printf("Error in chamfer_dist_kernel_wrapper: %s\n", cudaGetErrorString(err));
}
return {dist1, dist2, idx1, idx2};
}
template <typename scalar_t>
__global__ void chamfer_dist_grad_kernel(int b, int n, const scalar_t* __restrict__ xyz1, int m,
const scalar_t* __restrict__ xyz2,
const scalar_t* __restrict__ grad_dist1, const int* idx1,
scalar_t* __restrict__ grad_xyz1,
scalar_t* __restrict__ grad_xyz2)
{
for (int i = blockIdx.x; i < b; i += gridDim.x)
{
for (int j = threadIdx.x + blockIdx.y * blockDim.x; j < n; j += blockDim.x * gridDim.y)
{
scalar_t x1 = xyz1[(i * n + j) * 3 + 0];
scalar_t y1 = xyz1[(i * n + j) * 3 + 1];
scalar_t z1 = xyz1[(i * n + j) * 3 + 2];
int j2 = idx1[i * n + j];
scalar_t x2 = xyz2[(i * m + j2) * 3 + 0];
scalar_t y2 = xyz2[(i * m + j2) * 3 + 1];
scalar_t z2 = xyz2[(i * m + j2) * 3 + 2];
scalar_t g = grad_dist1[i * n + j] * 2;
atomicAdd(&(grad_xyz1[(i * n + j) * 3 + 0]), g * (x1 - x2));
atomicAdd(&(grad_xyz1[(i * n + j) * 3 + 1]), g * (y1 - y2));
atomicAdd(&(grad_xyz1[(i * n + j) * 3 + 2]), g * (z1 - z2));
atomicAdd(&(grad_xyz2[(i * m + j2) * 3 + 0]), -(g * (x1 - x2)));
atomicAdd(&(grad_xyz2[(i * m + j2) * 3 + 1]), -(g * (y1 - y2)));
atomicAdd(&(grad_xyz2[(i * m + j2) * 3 + 2]), -(g * (z1 - z2)));
}
}
}
std::vector<torch::Tensor> chamfer_dist_grad_kernel_wrapper(torch::Tensor xyz1, torch::Tensor xyz2,
torch::Tensor idx1, torch::Tensor idx2,
torch::Tensor grad_dist1,
torch::Tensor grad_dist2)
{
const int batch_size = xyz1.size(0);
const int n = xyz1.size(1); // num_points point cloud A
const int m = xyz2.size(1); // num_points point cloud B
torch::Tensor grad_xyz1 = torch::zeros_like(xyz1);
torch::Tensor grad_xyz2 = torch::zeros_like(xyz2);
AT_DISPATCH_FLOATING_TYPES(
xyz1.scalar_type(), "chamfer_dist_grad_cuda", ([&] {
chamfer_dist_grad_kernel<scalar_t><<<dim3(1, 16, 1), 256>>>(
batch_size, n, xyz1.data_ptr<scalar_t>(), m, xyz2.data_ptr<scalar_t>(),
grad_dist1.data_ptr<scalar_t>(), idx1.data_ptr<int>(),
grad_xyz1.data_ptr<scalar_t>(), grad_xyz2.data_ptr<scalar_t>());
chamfer_dist_grad_kernel<scalar_t><<<dim3(1, 16, 1), 256>>>(
batch_size, m, xyz2.data_ptr<scalar_t>(), n, xyz1.data_ptr<scalar_t>(),
grad_dist2.data_ptr<scalar_t>(), idx2.data_ptr<int>(),
grad_xyz2.data_ptr<scalar_t>(), grad_xyz1.data_ptr<scalar_t>());
}));
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
{
printf("Error in chamfer_dist_grad_kernel_wrapper: %s\n", cudaGetErrorString(err));
}
return {grad_xyz1, grad_xyz2};
}
|
the_stack
|
#include <cudf/column/column_view.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/scalar/scalar.hpp>
#include <cudf/utilities/traits.hpp>
#include <thrust/binary_search.h>
#include <thrust/execution_policy.h>
#include <thrust/functional.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/optional.h>
#include <thrust/pair.h>
namespace cudf {
namespace detail {
/**
* @brief The base class for the input or output index normalizing iterator.
*
* This implementation uses CRTP to define the `input_indexalator` and the
* `output_indexalator` classes. This is so this class can manipulate the
* uniquely typed subclass member variable `p_` directly without requiring
* virtual functions since iterator instances will be copied to device memory.
*
* The base class mainly manages updating the `p_` member variable while the
* subclasses handle accessing individual elements in device memory.
*
* @tparam T The derived class type for the iterator.
*/
template <class T>
struct base_indexalator {
using difference_type = ptrdiff_t;
using value_type = size_type;
using pointer = size_type*;
using iterator_category = std::random_access_iterator_tag;
base_indexalator() = default;
base_indexalator(base_indexalator const&) = default;
base_indexalator(base_indexalator&&) = default;
base_indexalator& operator=(base_indexalator const&) = default;
base_indexalator& operator=(base_indexalator&&) = default;
/**
* @brief Prefix increment operator.
*/
CUDF_HOST_DEVICE inline T& operator++()
{
T& derived = static_cast<T&>(*this);
derived.p_ += width_;
return derived;
}
/**
* @brief Postfix increment operator.
*/
CUDF_HOST_DEVICE inline T operator++(int)
{
T tmp{static_cast<T&>(*this)};
operator++();
return tmp;
}
/**
* @brief Prefix decrement operator.
*/
CUDF_HOST_DEVICE inline T& operator--()
{
T& derived = static_cast<T&>(*this);
derived.p_ -= width_;
return derived;
}
/**
* @brief Postfix decrement operator.
*/
CUDF_HOST_DEVICE inline T operator--(int)
{
T tmp{static_cast<T&>(*this)};
operator--();
return tmp;
}
/**
* @brief Compound assignment by sum operator.
*/
CUDF_HOST_DEVICE inline T& operator+=(difference_type offset)
{
T& derived = static_cast<T&>(*this);
derived.p_ += offset * width_;
return derived;
}
/**
* @brief Increment by offset operator.
*/
CUDF_HOST_DEVICE inline T operator+(difference_type offset) const
{
auto tmp = T{static_cast<T const&>(*this)};
tmp.p_ += (offset * width_);
return tmp;
}
/**
* @brief Addition assignment operator.
*/
CUDF_HOST_DEVICE inline friend T operator+(difference_type offset, T const& rhs)
{
T tmp{rhs};
tmp.p_ += (offset * rhs.width_);
return tmp;
}
/**
* @brief Compound assignment by difference operator.
*/
CUDF_HOST_DEVICE inline T& operator-=(difference_type offset)
{
T& derived = static_cast<T&>(*this);
derived.p_ -= offset * width_;
return derived;
}
/**
* @brief Decrement by offset operator.
*/
CUDF_HOST_DEVICE inline T operator-(difference_type offset) const
{
auto tmp = T{static_cast<T const&>(*this)};
tmp.p_ -= (offset * width_);
return tmp;
}
/**
* @brief Subtraction assignment operator.
*/
CUDF_HOST_DEVICE inline friend T operator-(difference_type offset, T const& rhs)
{
T tmp{rhs};
tmp.p_ -= (offset * rhs.width_);
return tmp;
}
/**
* @brief Compute offset from iterator difference operator.
*/
CUDF_HOST_DEVICE inline difference_type operator-(T const& rhs) const
{
return (static_cast<T const&>(*this).p_ - rhs.p_) / width_;
}
/**
* @brief Equals to operator.
*/
CUDF_HOST_DEVICE inline bool operator==(T const& rhs) const
{
return rhs.p_ == static_cast<T const&>(*this).p_;
}
/**
* @brief Not equals to operator.
*/
CUDF_HOST_DEVICE inline bool operator!=(T const& rhs) const
{
return rhs.p_ != static_cast<T const&>(*this).p_;
}
/**
* @brief Less than operator.
*/
CUDF_HOST_DEVICE inline bool operator<(T const& rhs) const
{
return static_cast<T const&>(*this).p_ < rhs.p_;
}
/**
* @brief Greater than operator.
*/
CUDF_HOST_DEVICE inline bool operator>(T const& rhs) const
{
return static_cast<T const&>(*this).p_ > rhs.p_;
}
/**
* @brief Less than or equals to operator.
*/
CUDF_HOST_DEVICE inline bool operator<=(T const& rhs) const
{
return static_cast<T const&>(*this).p_ <= rhs.p_;
}
/**
* @brief Greater than or equals to operator.
*/
CUDF_HOST_DEVICE inline bool operator>=(T const& rhs) const
{
return static_cast<T const&>(*this).p_ >= rhs.p_;
}
protected:
/**
* @brief Constructor assigns width and type member variables for base class.
*/
base_indexalator(int32_t width, data_type dtype) : width_(width), dtype_(dtype) {}
int width_; /// integer type width = 1,2,4, or 8
data_type dtype_; /// for type-dispatcher calls
};
/**
* @brief The index normalizing input iterator.
*
* This is an iterator that can be used for index types (integers) without
* requiring a type-specific instance. It can be used for any iterator
* interface for reading an array of integer values of type
* int8, int16, int32, int64, uint8, uint16, uint32, or uint64.
* Reading specific elements always return a `size_type` integer.
*
* Use the indexalator_factory to create an appropriate input iterator
* from a column_view.
*
* Example input iterator usage.
* @code
* auto begin = indexalator_factory::create_input_iterator(gather_map);
* auto end = begin + gather_map.size();
* auto result = detail::gather( source, begin, end, IGNORE, stream, mr );
* @endcode
*
* @code
* auto begin = indexalator_factory::create_input_iterator(indices);
* auto end = begin + indices.size();
* auto result = thrust::find(thrust::device, begin, end, size_type{12} );
* @endcode
*/
struct input_indexalator : base_indexalator<input_indexalator> {
friend struct indexalator_factory;
friend struct base_indexalator<input_indexalator>; // for CRTP
using reference = size_type const; // this keeps STL and thrust happy
input_indexalator() = default;
input_indexalator(input_indexalator const&) = default;
input_indexalator(input_indexalator&&) = default;
input_indexalator& operator=(input_indexalator const&) = default;
input_indexalator& operator=(input_indexalator&&) = default;
/**
* @brief Indirection operator returns the value at the current iterator position.
*/
__device__ inline size_type operator*() const { return operator[](0); }
/**
* @brief Dispatch functor for resolving a size_type value from any index type.
*/
struct index_as_size_type {
template <typename T, std::enable_if_t<is_index_type<T>()>* = nullptr>
__device__ size_type operator()(void const* tp)
{
return static_cast<size_type>(*static_cast<T const*>(tp));
}
template <typename T, std::enable_if_t<not is_index_type<T>()>* = nullptr>
__device__ size_type operator()(void const* tp)
{
CUDF_UNREACHABLE("only index types are supported");
}
};
/**
* @brief Array subscript operator returns a value at the input
* `idx` position as a `size_type` value.
*/
__device__ inline size_type operator[](size_type idx) const
{
void const* tp = p_ + (idx * width_);
return type_dispatcher(dtype_, index_as_size_type{}, tp);
}
protected:
/**
* @brief Create an input index normalizing iterator.
*
* Use the indexalator_factory to create an iterator instance.
*
* @param data Pointer to an integer array in device memory.
* @param width The width of the integer type (1, 2, 4, or 8)
* @param data_type Index integer type of width `width`
*/
input_indexalator(void const* data, int width, data_type dtype)
: base_indexalator<input_indexalator>(width, dtype), p_{static_cast<char const*>(data)}
{
}
char const* p_; /// pointer to the integer data in device memory
};
/**
* @brief The index normalizing output iterator.
*
* This is an iterator that can be used for index types (integers) without
* requiring a type-specific instance. It can be used for any iterator
* interface for writing an array of integer values of type
* int8, int16, int32, int64, uint8, uint16, uint32, or uint64.
* Setting specific elements always accept `size_type` integer values.
*
* Use the indexalator_factory to create an appropriate output iterator
* from a mutable_column_view.
*
* Example output iterator usage.
* @code
* auto result_itr = indexalator_factory::create_output_iterator(indices->mutable_view());
* thrust::lower_bound(rmm::exec_policy(stream),
* input->begin<Element>(),
* input->end<Element>(),
* values->begin<Element>(),
* values->end<Element>(),
* result_itr,
* thrust::less<Element>());
* @endcode
*/
struct output_indexalator : base_indexalator<output_indexalator> {
friend struct indexalator_factory;
friend struct base_indexalator<output_indexalator>; // for CRTP
using reference = output_indexalator const&; // required for output iterators
output_indexalator() = default;
output_indexalator(output_indexalator const&) = default;
output_indexalator(output_indexalator&&) = default;
output_indexalator& operator=(output_indexalator const&) = default;
output_indexalator& operator=(output_indexalator&&) = default;
/**
* @brief Indirection operator returns this iterator instance in order
* to capture the `operator=(size_type)` calls.
*/
__device__ inline output_indexalator const& operator*() const { return *this; }
/**
* @brief Array subscript operator returns an iterator instance at the specified `idx` position.
*
* This allows capturing the subsequent `operator=(size_type)` call in this class.
*/
__device__ inline output_indexalator const operator[](size_type idx) const
{
output_indexalator tmp{*this};
tmp.p_ += (idx * width_);
return tmp;
}
/**
* @brief Dispatch functor for setting the index value from a size_type value.
*/
struct size_type_to_index {
template <typename T, std::enable_if_t<is_index_type<T>()>* = nullptr>
__device__ void operator()(void* tp, size_type const value)
{
(*static_cast<T*>(tp)) = static_cast<T>(value);
}
template <typename T, std::enable_if_t<not is_index_type<T>()>* = nullptr>
__device__ void operator()(void* tp, size_type const value)
{
CUDF_UNREACHABLE("only index types are supported");
}
};
/**
* @brief Assign a size_type value to the current iterator position.
*/
__device__ inline output_indexalator const& operator=(size_type const value) const
{
void* tp = p_;
type_dispatcher(dtype_, size_type_to_index{}, tp, value);
return *this;
}
protected:
/**
* @brief Create an output index normalizing iterator.
*
* Use the indexalator_factory to create an iterator instance.
*
* @param data Pointer to an integer array in device memory.
* @param width The width of the integer type (1, 2, 4, or 8)
* @param data_type Index integer type of width `width`
*/
output_indexalator(void* data, int width, data_type dtype)
: base_indexalator<output_indexalator>(width, dtype), p_{static_cast<char*>(data)}
{
}
char* p_; /// pointer to the integer data in device memory
};
/**
* @brief Use this class to create an indexalator instance.
*/
struct indexalator_factory {
/**
* @brief A type_dispatcher functor to create an input iterator from an indices column.
*/
struct input_indexalator_fn {
template <typename IndexType, std::enable_if_t<is_index_type<IndexType>()>* = nullptr>
input_indexalator operator()(column_view const& indices)
{
return input_indexalator(indices.data<IndexType>(), sizeof(IndexType), indices.type());
}
template <typename IndexType,
typename... Args,
std::enable_if_t<not is_index_type<IndexType>()>* = nullptr>
input_indexalator operator()(Args&&... args)
{
CUDF_FAIL("indices must be an index type");
}
};
/**
* @brief Use this class to create an indexalator to a scalar index.
*/
struct input_indexalator_scalar_fn {
template <typename IndexType, std::enable_if_t<is_index_type<IndexType>()>* = nullptr>
input_indexalator operator()(scalar const& index)
{
// note: using static_cast<scalar_type_t<IndexType> const&>(index) creates a copy
auto const scalar_impl = static_cast<scalar_type_t<IndexType> const*>(&index);
return input_indexalator(scalar_impl->data(), sizeof(IndexType), index.type());
}
template <typename IndexType,
typename... Args,
std::enable_if_t<not is_index_type<IndexType>()>* = nullptr>
input_indexalator operator()(Args&&... args)
{
CUDF_FAIL("scalar must be an index type");
}
};
/**
* @brief A type_dispatcher functor to create an output iterator from an indices column.
*/
struct output_indexalator_fn {
template <typename IndexType, std::enable_if_t<is_index_type<IndexType>()>* = nullptr>
output_indexalator operator()(mutable_column_view const& indices)
{
return output_indexalator(indices.data<IndexType>(), sizeof(IndexType), indices.type());
}
template <typename IndexType,
typename... Args,
std::enable_if_t<not is_index_type<IndexType>()>* = nullptr>
output_indexalator operator()(Args&&... args)
{
CUDF_FAIL("indices must be an index type");
}
};
/**
* @brief Create an input indexalator instance from an indices column.
*/
static input_indexalator make_input_iterator(column_view const& indices)
{
return type_dispatcher(indices.type(), input_indexalator_fn{}, indices);
}
/**
* @brief Create an input indexalator instance from an index scalar.
*/
static input_indexalator make_input_iterator(cudf::scalar const& index)
{
return type_dispatcher(index.type(), input_indexalator_scalar_fn{}, index);
}
/**
* @brief Create an output indexalator instance from an indices column.
*/
static output_indexalator make_output_iterator(mutable_column_view const& indices)
{
return type_dispatcher(indices.type(), output_indexalator_fn{}, indices);
}
/**
* @brief An index accessor that returns a validity flag along with the index value.
*
* This is suitable as a `pair_iterator` for calling functions like `copy_if_else`.
*/
struct nullable_index_accessor {
input_indexalator iter;
bitmask_type const* null_mask{};
size_type const offset{};
bool const has_nulls{};
/**
* @brief Create an accessor from a column_view.
*/
nullable_index_accessor(column_view const& col, bool has_nulls = false)
: null_mask{col.null_mask()}, offset{col.offset()}, has_nulls{has_nulls}
{
if (has_nulls) { CUDF_EXPECTS(col.nullable(), "Unexpected non-nullable column."); }
iter = make_input_iterator(col);
}
__device__ thrust::pair<size_type, bool> operator()(size_type i) const
{
return {iter[i], (has_nulls ? bit_is_set(null_mask, i + offset) : true)};
}
};
/**
* @brief An index accessor that returns a validity flag along with the index value.
*
* This is suitable as a `pair_iterator`.
*/
struct scalar_nullable_index_accessor {
input_indexalator iter;
bool const is_null;
/**
* @brief Create an accessor from a scalar.
*/
scalar_nullable_index_accessor(scalar const& input) : is_null{!input.is_valid()}
{
iter = indexalator_factory::make_input_iterator(input);
}
__device__ thrust::pair<size_type, bool> operator()(size_type) const
{
return {*iter, is_null};
}
};
/**
* @brief Create an index iterator with a nullable index accessor.
*/
static auto make_input_pair_iterator(column_view const& col)
{
return make_counting_transform_iterator(0, nullable_index_accessor{col, col.has_nulls()});
}
/**
* @brief Create an index iterator with a nullable index accessor for a scalar.
*/
static auto make_input_pair_iterator(scalar const& input)
{
return thrust::make_transform_iterator(thrust::make_constant_iterator<size_type>(0),
scalar_nullable_index_accessor{input});
}
/**
* @brief An index accessor that returns an index value if corresponding validity flag is true.
*
* This is suitable as an `optional_iterator`.
*/
struct optional_index_accessor {
input_indexalator iter;
bitmask_type const* null_mask{};
size_type const offset{};
bool const has_nulls{};
/**
* @brief Create an accessor from a column_view.
*/
optional_index_accessor(column_view const& col, bool has_nulls = false)
: null_mask{col.null_mask()}, offset{col.offset()}, has_nulls{has_nulls}
{
if (has_nulls) { CUDF_EXPECTS(col.nullable(), "Unexpected non-nullable column."); }
iter = make_input_iterator(col);
}
__device__ thrust::optional<size_type> operator()(size_type i) const
{
return has_nulls && !bit_is_set(null_mask, i + offset) ? thrust::nullopt
: thrust::make_optional(iter[i]);
}
};
/**
* @brief An index accessor that returns an index value if corresponding validity flag is true.
*
* This is suitable as an `optional_iterator`.
*/
struct scalar_optional_index_accessor {
input_indexalator iter;
bool const is_null;
/**
* @brief Create an accessor from a scalar.
*/
scalar_optional_index_accessor(scalar const& input) : is_null{!input.is_valid()}
{
iter = indexalator_factory::make_input_iterator(input);
}
__device__ thrust::optional<size_type> operator()(size_type) const
{
return is_null ? thrust::nullopt : thrust::make_optional(*iter);
}
};
/**
* @brief Create an index iterator with a nullable index accessor.
*/
static auto make_input_optional_iterator(column_view const& col)
{
return make_counting_transform_iterator(0, optional_index_accessor{col, col.has_nulls()});
}
/**
* @brief Create an index iterator with a nullable index accessor for a scalar.
*/
static auto make_input_optional_iterator(scalar const& input)
{
return thrust::make_transform_iterator(thrust::make_constant_iterator<size_type>(0),
scalar_optional_index_accessor{input});
}
};
} // namespace detail
} // namespace cudf
|
the_stack
|
#include <stdio.h>
// #include <vector>
#include <cmath>
#include <cassert>
#ifdef CUDA_5
#include <helper_cuda.h>
#define CUDA_SAFE_CALL checkCudaErrors
#else
#include <cutil.h>
#endif
#include "cuda_pointer.h"
#define NTHREAD 64 // 64, 96, 128 or 192
#define NJBLOCK 28 // 8800GTS/512 has 16
#define NIBLOCK 16 // 16 or 32
#define NIMAX (NTHREAD * NIBLOCK) // 1024
#define NXREDUCE 32 // must be >NJBLOCK
#define NYREDUCE 8
#define NB_PER_BLOCK 256 // NNB per block
#define NB_BUF_SIZE (1<<20)
#define NAN_CHECK(val) assert((val) == (val));
typedef unsigned short uint16;
// template <class T>
// struct myvector{
// int num;
// T *val;
// myvector(){
// num = 0;
// val = NULL;
// }
// ~myvector(){
// delete [] val;
// }
// void clear(){
// num = 0;
// }
// void reserve(size_t count){
// val = new T[count];
// }
// void free(){
// delete [] val;
// }
// void push_back(const T &t){
// val[num++] = t;
// }
// size_t size(){
// return num;
// }
// T &operator[](int i){
// return val[i];
// }
// };
#define PROFILE
#ifdef PROFILE
#include <sys/time.h>
static double get_wtime(){
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec + 1.e-6 * tv.tv_usec;
}
#else
static double get_wtime(){
return 0.0;
}
#endif
static double time_send, time_grav, time_out, time_nb;
static long long numInter;
struct Jparticle{
float3 pos;
float mass;
float3 vel;
float pad;
Jparticle() {}
Jparticle(double mj, double xj[3], double vj[3]){
pos.x = xj[0];
pos.y = xj[1];
pos.z = xj[2];
mass = mj;
vel.x = vj[0];
vel.y = vj[1];
vel.z = vj[2];
NAN_CHECK(xj[0]);
NAN_CHECK(xj[1]);
NAN_CHECK(xj[2]);
NAN_CHECK(mj);
NAN_CHECK(vj[0]);
NAN_CHECK(vj[1]);
NAN_CHECK(vj[2]);
}
};
struct Iparticle{
float3 pos;
float h2;
float3 vel;
float dtr;
Iparticle() {}
Iparticle(double h2i, double dtri,double xi[3], double vi[3]){
pos.x = xi[0];
pos.y = xi[1];
pos.z = xi[2];
h2 = h2i;
vel.x = vi[0];
vel.y = vi[1];
vel.z = vi[2];
dtr = dtri;
NAN_CHECK(xi[0]);
NAN_CHECK(xi[1]);
NAN_CHECK(xi[2]);
NAN_CHECK(h2i);
NAN_CHECK(vi[0]);
NAN_CHECK(vi[1]);
NAN_CHECK(vi[2]);
}
};
struct Force{
float3 acc;
float pot;
float3 jrk;
int nnb; // 8 words
// unsigned short neib[NB_PER_BLOCK]; // 24 words
// __device__ Force(){
// acc.x = acc.y = acc.z = 0.f;
// jrk.x = jrk.y = jrk.z = 0.f;
// pot = 0.f;
// nnb = 0;
// }
__device__ void clear(){
acc.x = acc.y = acc.z = 0.f;
jrk.x = jrk.y = jrk.z = 0.f;
pot = 0.f;
nnb = 0;
}
__device__ void operator+=(const Force &rhs){
acc.x += rhs.acc.x;
acc.y += rhs.acc.y;
acc.z += rhs.acc.z;
#ifdef POTENTIAL
pot += rhs.pot;
#endif
jrk.x += rhs.jrk.x;
jrk.y += rhs.jrk.y;
jrk.z += rhs.jrk.z;
if(nnb>=0 && rhs.nnb>=0){
nnb += rhs.nnb;
}else{
nnb = -1;
}
}
#if __CUDA_ARCH__ >= 300
__device__ void reduce_with(const int mask){
acc.x += __shfl_xor(acc.x, mask);
acc.y += __shfl_xor(acc.y, mask);
acc.z += __shfl_xor(acc.z, mask);
#ifdef POTENTIAL
pot += __shfl_xor(pot , mask);
#endif
jrk.x += __shfl_xor(jrk.x, mask);
jrk.y += __shfl_xor(jrk.y, mask);
jrk.z += __shfl_xor(jrk.z, mask);
int ntmp = __shfl_xor(nnb, mask);
if(nnb>=0 && ntmp>=0){
nnb += ntmp;
}else{
nnb = -1;
}
}
#endif
};
// __device__ float rsqrtfNR(float x){
// float y = rsqrtf(x);
// return (-0.5f * y) * (x*y*y - 3.0f);
// }
__device__ void h4_kernel(
const int j,
const Iparticle &ip,
const Jparticle &jp,
Force &fo,
uint16 nblist[]){
float dx = jp.pos.x - ip.pos.x;
float dy = jp.pos.y - ip.pos.y;
float dz = jp.pos.z - ip.pos.z;
float dvx = jp.vel.x - ip.vel.x;
float dvy = jp.vel.y - ip.vel.y;
float dvz = jp.vel.z - ip.vel.z;
float r2 = dx*dx + dy*dy + dz*dz;
//Add Velocity criterion============================//
float dxp = dx + ip.dtr * dvx;
float dyp = dy + ip.dtr * dvy;
float dzp = dz + ip.dtr * dvz;
float r2p = dxp*dxp + dyp*dyp + dzp*dzp;
//==================================================//
float rv = dx*dvx + dy*dvy + dz*dvz;
float rinv1 = rsqrtf(r2);
if(min(r2,r2p) < ip.h2){
// fo.neib[fo.nnb++ % NB_PER_BLOCK] = j;
nblist[fo.nnb & (NB_PER_BLOCK-1)] = (uint16)j;
fo.nnb++;
rinv1 = 0.f;
}
float rinv2 = rinv1 * rinv1;
float mrinv1 = jp.mass * rinv1;
float mrinv3 = mrinv1 * rinv2;
rv *= -3.f * rinv2;
#ifdef POTENTIAL
fo.pot += mrinv1;
#endif
fo.acc.x += mrinv3 * dx;
fo.acc.y += mrinv3 * dy;
fo.acc.z += mrinv3 * dz;
// fo.acc.z += 1.0;
fo.jrk.x += mrinv3 * (dvx + rv * dx);
fo.jrk.y += mrinv3 * (dvy + rv * dy);
fo.jrk.z += mrinv3 * (dvz + rv * dz);
}
__global__ void h4_gravity(
const int nbody,
const Iparticle ipbuf[],
const Jparticle jpbuf[],
Force fobuf[][NJBLOCK],
uint16 nbbuf[][NJBLOCK][NB_PER_BLOCK]){
int ibid = blockIdx.x;
int jbid = blockIdx.y;
int tid = threadIdx.x;
int iaddr = tid + NTHREAD * ibid;
int jstart = (nbody * (jbid )) / NJBLOCK;
int jend = (nbody * (jbid+1)) / NJBLOCK;
Iparticle ip = ipbuf[iaddr];
Force fo;
fo.clear();
uint16 *nblist = nbbuf[iaddr][jbid];
#if __CUDA_ARCH__ >= 300 // just some trial
for(int j=jstart; j<jend; j+=32){
__shared__ Jparticle jpshare[32];
__syncthreads();
float4 *src = (float4 *)&jpbuf[j];
float4 *dst = (float4 *)jpshare;
dst[tid] = src[tid];
__syncthreads();
if(jend-j < 32){
#pragma unroll 4
for(int jj=0; jj<jend-j; jj++){
const Jparticle jp = jpshare[jj];
// const Jparticle jp( (float4 *)jpshare + 2*jj);
h4_kernel(j-jstart+jj, ip, jp, fo, nblist);
}
}else{
#pragma unroll 8
for(int jj=0; jj<32; jj++){
const Jparticle jp = jpshare[jj];
// const Jparticle jp( (float4 *)jpshare + 2*jj);
h4_kernel(j-jstart+jj, ip, jp, fo, nblist);
}
}
}
#else
for(int j=jstart; j<jend; j+=NTHREAD){
__shared__ Jparticle jpshare[NTHREAD];
__syncthreads();
float4 *src = (float4 *)&jpbuf[j];
float4 *dst = (float4 *)jpshare;
dst[ tid] = src[ tid];
dst[NTHREAD+tid] = src[NTHREAD+tid];
__syncthreads();
if(jend-j < NTHREAD){
#pragma unroll 4
for(int jj=0; jj<jend-j; jj++){
Jparticle jp = jpshare[jj];
h4_kernel(j-jstart+jj, ip, jp, fo, nblist);
}
}else{
#pragma unroll 8
for(int jj=0; jj<NTHREAD; jj++){
Jparticle jp = jpshare[jj];
h4_kernel(j-jstart+jj, ip, jp, fo, nblist);
}
}
}
#endif
if(fo.nnb > NB_PER_BLOCK) fo.nnb = -1;
fobuf[iaddr][jbid] = fo;
}
#if __CUDA_ARCH__ >= 300
__device__ void warp_reduce_int(int inp, int *out){
inp += __shfl_xor(inp, 1);
inp += __shfl_xor(inp, 2);
inp += __shfl_xor(inp, 4);
inp += __shfl_xor(inp, 8);
# if NXREDUCE==32
inp += __shfl_xor(inp, 16);
# endif
*out = inp;
}
__device__ void warp_reduce_float8(float4 inp1, float4 inp2, float *out){
const int tid = threadIdx.x;
float4 tmp4L = (4&tid) ? inp2 : inp1;
float4 tmp4R = (4&tid) ? inp1 : inp2;
tmp4L.x += __shfl_xor(tmp4R.x, 4);
tmp4L.y += __shfl_xor(tmp4R.y, 4);
tmp4L.z += __shfl_xor(tmp4R.z, 4);
tmp4L.w += __shfl_xor(tmp4R.w, 4);
float4 tmp4;
tmp4.x = (2&tid) ? tmp4L.z : tmp4L.x;
tmp4.y = (2&tid) ? tmp4L.w : tmp4L.y;
tmp4.z = (2&tid) ? tmp4L.x : tmp4L.z;
tmp4.w = (2&tid) ? tmp4L.y : tmp4L.w;
tmp4.x += __shfl_xor(tmp4.z, 2);
tmp4.y += __shfl_xor(tmp4.w, 2);
float2 tmp2;
tmp2.x = (1&tid) ? tmp4.y : tmp4.x;
tmp2.y = (1&tid) ? tmp4.x : tmp4.y;
tmp2.x += __shfl_xor(tmp2.y, 1);
tmp2.x += __shfl_xor(tmp2.x, 8);
# if NXREDUCE==32
tmp2.x += __shfl_xor(tmp2.x, 16);
# endif
if(tid < 8){
out[tid] = tmp2.x;
}
}
#endif
__global__ void force_reduce_kernel(
const int ni,
const Force fpart[][NJBLOCK],
Force ftot []){
const int xid = threadIdx.x;
const int yid = threadIdx.y;
const int bid = blockIdx.x;
const int iaddr = yid + blockDim.y * bid;
#if __CUDA_ARCH__ >= 300
Force f;
if(xid < NJBLOCK){
f = fpart[iaddr][xid];
}else{
f.clear();
}
if(iaddr < ni){
const float4 tmp1 = make_float4(f.acc.x, f.acc.y, f.acc.z, f.pot);
const float4 tmp2 = make_float4(f.jrk.x, f.jrk.y, f.jrk.z, 0.0f);
const int itmp = f.nnb;
float *dst = (float *)(ftot + iaddr);
int *idst = (int *)(dst + 7);
warp_reduce_float8(tmp1, tmp2, dst);
warp_reduce_int(itmp, idst);
}
#else
__shared__ Force fshare[NYREDUCE][NXREDUCE];
if(xid < NJBLOCK){
fshare[yid][xid] = fpart[iaddr][xid];
}else{
fshare[yid][xid].clear();
}
Force *fs = fshare[yid];
#if NXREDUCE==32
if(xid < 16) fs[xid] += fs[xid + 16];
#endif
if(xid < 8) fs[xid] += fs[xid + 8];
if(xid < 4) fs[xid] += fs[xid + 4];
if(xid < 2) fs[xid] += fs[xid + 2];
if(xid < 1) fs[xid] += fs[xid + 1];
if(iaddr < ni){
ftot[iaddr] = fs[0];
}
#endif
}
__global__ void gather_nb_kernel(
const int ni,
const int nj,
const Force fpart[][NJBLOCK],
const Force ftot [],
const int nboff[],
const uint16 nbpart[][NJBLOCK][NB_PER_BLOCK],
int nblist[])
{
const int xid = threadIdx.x;
const int yid = threadIdx.y;
const int bid = blockIdx.x;
const int iaddr = yid + blockDim.y * bid;
if(iaddr >= ni) return;
if(ftot[iaddr].nnb < 0) return;
const int mynnb = (xid < NJBLOCK) ? fpart[iaddr][xid].nnb
: 0;
// now performe prefix sum
#if __CUDA_ARCH__ >= 300
int ix = mynnb;
#pragma unroll
for(int ioff=1; ioff<NXREDUCE; ioff*=2){
int iy = __shfl_up(ix, ioff);
if(xid>=ioff) ix += iy;
}
int iz = __shfl_up(ix, 1);
const int off = (xid == 0) ? 0 : iz;
#else
__shared__ int ishare[NYREDUCE][NXREDUCE];
ishare[yid][xid] = mynnb;
volatile int *ish = ishare[yid];
if(xid>=1) ish[xid] += ish[xid-1];
if(xid>=2) ish[xid] += ish[xid-2];
if(xid>=4) ish[xid] += ish[xid-4];
if(xid>=8) ish[xid] += ish[xid-8];
#if NXREDUCE==32
if(xid>=16) ish[xid] += ish[xid-16];
#endif
const int off = (xid == 0) ? 0
: ish[xid-1];
#endif
int *nbdst = nblist + nboff[iaddr] + off;
const int jstart = (nj * xid) / NJBLOCK;
if(xid < NJBLOCK){
for(int k=0; k<mynnb; k++){
const int nbid = jstart + int(nbpart[iaddr][xid][k]);
// const int nbid = iaddr * 1000 + k;
nbdst[k] = nbid;
}
}
}
static cudaPointer <Jparticle> jpbuf;
static cudaPointer <Iparticle> ipbuf;
static cudaPointer <Force[NJBLOCK]> fopart;
static cudaPointer <Force> fobuf;
static cudaPointer <uint16[NJBLOCK][NB_PER_BLOCK]>nbpart;
static cudaPointer <int> nblist;
static cudaPointer <int> nboff;
//static myvector<int> nblist;
static int nbody, nbodymax;
static int devid, numGPU;
static bool is_open = false;
static bool devinit = false;
// static int *nblist;
void GPUNB_devinit(int irank){
if(devinit) return;
cudaGetDeviceCount(&numGPU);
assert(numGPU > 0);
char *gpu_list = getenv("GPU_LIST");
if(gpu_list)
{
numGPU = 0;
char *p = strtok(gpu_list, " ");
if (p) {
devid = atoi(p);
numGPU++;
}
assert(numGPU > 0);
}else{
devid=irank%numGPU;
}
cudaSetDevice(devid);
#ifdef PROFILE
fprintf(stderr, "***********************\n");
fprintf(stderr, "Initializing NBODY6/GPU library\n");
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, devid);
fprintf(stderr, "#GPU %d; device: %d %s\n", numGPU, devid, prop.name);
fprintf(stderr, "***********************\n");
#endif
devinit = true;
}
void GPUNB_open(int nbmax,int irank){
time_send = time_grav = time_nb = time_out = 0.0;
numInter = 0;
//select GPU========================================//
GPUNB_devinit(irank);
if(is_open){
fprintf(stderr, "gpunb: it is already open\n");
return;
}
is_open = true;
//==================================================//
// CUT_DEVICE_INIT();
// size_t jpsize = nbmax * sizeof(Jparticle);
// size_t ipsize = NIMAX * sizeof(Iparticle);
// size_t fosize = NIBLOCK * NJBLOCK * NTHREAD * sizeof(Force);
// cudaMallocHost((void **)&jp_host, jpsize);
// jpsize += NTHREAD * sizeof(Jparticle);
// cudaMalloc ((void **)&jp_dev , jpsize);
// cudaMallocHost((void **)&ip_host, ipsize);
// cudaMalloc ((void **)&ip_dev , ipsize);
// cudaMallocHost((void **)&fo_host, fosize);
// cudaMalloc ((void **)&fo_dev , fosize);
jpbuf.allocate(nbmax + NTHREAD);
ipbuf.allocate(NIMAX);
fopart.allocate(NIMAX);
fobuf.allocate(NIMAX);
nbpart.allocate(NIMAX);
nblist.allocate(NB_BUF_SIZE);
nboff.allocate(NIMAX+1);
nbodymax = nbmax;
// nblist.reserve(nbmax);
#ifdef PROFILE
fprintf(stderr, "RANK: %d ******************\n",irank);
fprintf(stderr, "Opened NBODY6/GPU library\n");
fprintf(stderr, "nbmax = %d\n", nbmax);
fprintf(stderr, "***********************\n");
#endif
}
void GPUNB_close(){
if(!is_open){
fprintf(stderr, "gpunb: it is already close\n");
return;
}
is_open = false;
// cudaFreeHost(jp_host);
// cudaFree (jp_dev);
// cudaFreeHost(ip_host);
// cudaFree (ip_dev);
// cudaFreeHost(fo_host);
// cudaFree (fo_dev);
jpbuf.free();
ipbuf.free();
fopart.free();
fobuf.free();
nbpart.free();
nblist.free();
nboff.free();
nbodymax = 0;
#ifdef PROFILE
fprintf(stderr, "Closed NBODY6/GPU library\n");
fprintf(stderr, "%d*********************\n",devid);
fprintf(stderr, "time send : %f sec\n", time_send);
fprintf(stderr, "time grav : %f sec\n", time_grav);
fprintf(stderr, "time nb : %f sec\n", time_nb);
fprintf(stderr, "time out : %f sec\n", time_out);
fprintf(stderr, "%f Gflops (gravity part only)\n", 60.e-9 * numInter / time_grav);
fprintf(stderr, "***********************\n");
#endif
}
void GPUNB_send(
int nj,
double mj[],
double xj[][3],
double vj[][3]){
time_send -= get_wtime();
nbody = nj;
assert(nbody <= nbodymax);
// time_send -= get_wtime();
for(int j=0; j<nj; j++){
// jp_host[j] = Jparticle(mj[j], xj[j], vj[j]);
jpbuf[j] = Jparticle(mj[j], xj[j], vj[j]);
}
// size_t jpsize = nj * sizeof(Jparticle);
// cudaMemcpy(jp_dev, jp_host, jpsize, cudaMemcpyHostToDevice);
jpbuf.htod(nj);
time_send += get_wtime();
}
void GPUNB_regf(
int ni,
double h2[],
double dtr[],
double xi[][3],
double vi[][3],
double acc[][3],
double jrk[][3],
double pot[],
int lmax,
int nnbmax,
int *listbase){
assert(is_open);
time_grav -= get_wtime();
numInter += ni * nbody;
assert(0 < ni && ni <= NIMAX);
/* printf(" ni lm %d %d %d \t %e %e %e\n",ni, lmax, nnbmax, h2[0], xi[0][0], vi[0][0]);*/
for(int i=0; i<ni; i++){
// ip_host[i] = Iparticle(h2[i], xi[i], vi[i]);
ipbuf[i] = Iparticle(h2[i],dtr[i], xi[i], vi[i]);
}
// set i-particles
// size_t ipsize = ni * sizeof(Iparticle);
// cudaMemcpy(ip_dev, ip_host, ipsize, cudaMemcpyHostToDevice);
ipbuf.htod(ni);
// gravity kernel
int niblock = 1 + (ni-1) / NTHREAD;
dim3 grid(niblock, NJBLOCK, 1);
dim3 threads(NTHREAD, 1, 1);
// h4_gravity <<< grid, threads >>>
// (nbody, ip_dev, jp_dev, fo_dev);
h4_gravity <<< grid, threads >>>
(nbody, ipbuf, jpbuf, fopart, nbpart);
const int ni8 = 1 + (ni-1) / NYREDUCE;
dim3 rgrid (ni8, 1, 1);
dim3 rthreads(NXREDUCE, NYREDUCE, 1);
force_reduce_kernel <<< rgrid, rthreads >>>
(ni, fopart, fobuf);
// recieve force
// size_t fosize = ni * NJBLOCK * sizeof(Force);
// cudaMemcpy(fo_host, fo_dev, fosize, cudaMemcpyDeviceToHost);
fobuf.dtoh(ni);
double wt = get_wtime();
time_grav += wt;
time_nb -= wt;
// now make prefix sum
int nbsum = 0;
for(int i=0; i<ni; i++){
nboff[i] = nbsum;
const int nnb = fobuf[i].nnb;
if(nnb >= 0) nbsum += nnb;
}
assert(nbsum <= NB_BUF_SIZE);
nboff.htod(ni);
gather_nb_kernel <<< rgrid, rthreads>>>
(ni, nbody, fopart, fobuf, nboff, nbpart, nblist);
nblist.dtoh(nbsum);
wt = get_wtime();
time_nb += get_wtime();
time_out -= get_wtime();
// out data
for(int i=0; i<ni; i++){
Force &fo = fobuf[i];
acc[i][0] = fo.acc.x;
acc[i][1] = fo.acc.y;
acc[i][2] = fo.acc.z;
jrk[i][0] = fo.jrk.x;
jrk[i][1] = fo.jrk.y;
jrk[i][2] = fo.jrk.z;
// fprintf(stderr, "%f %f %f %f %f %f\n", acc[i][0], acc[i][1], acc[i][2], jrk[i][0], jrk[i][1], jrk[i][2]);
// exit(0);
#ifdef POTENTIAL
pot[i] = fo.pot;
#endif
int *nnbp = listbase + lmax * i;
int *nblistp = nnbp + 1;
if(fo.nnb >=0 && fo.nnb <= nnbmax){
*nnbp = fo.nnb;
// fprintf(stderr, "nnb %d\n", fo.nnb);
const int off = nboff[i];
for(int k=0; k<fo.nnb; k++) nblistp[k]=nblist[off + k];
}
else *nnbp = fo.nnb ? -abs(fo.nnb) : -1;
}
time_out += get_wtime();
}
extern "C" {
void gpunb_devinit_ (int *irank){
GPUNB_devinit(*irank);
}
void gpunb_open_(int *nbmax, int *irank){
GPUNB_open(*nbmax, *irank);
}
void gpunb_close_(){
GPUNB_close();
}
void gpunb_send_(
int *nj,
double mj[],
double xj[][3],
double vj[][3]){
GPUNB_send(*nj, mj, xj, vj);
}
void gpunb_regf_(
int *ni,
double h2[],
double dtr[],
double xi[][3],
double vi[][3],
double acc[][3],
double jrk[][3],
double pot[],
int *lmax,
int *nnbmax,
int *list){ // list[][lmax]
GPUNB_regf(*ni, h2, dtr, xi, vi, acc, jrk, pot, *lmax, *nnbmax, list);
}
}
|
the_stack
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.