123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269 |
- #ifndef OPENCV_CUDA_EMULATION_HPP_
- #define OPENCV_CUDA_EMULATION_HPP_
- #include "common.hpp"
- #include "warp_reduce.hpp"
- namespace cv { namespace cuda { namespace device
- {
- struct Emulation
- {
- static __device__ __forceinline__ int syncthreadsOr(int pred)
- {
- #if defined (__CUDA_ARCH__) && (__CUDA_ARCH__ < 200)
-
- return 0;
- #else
- return __syncthreads_or(pred);
- #endif
- }
- template<int CTA_SIZE>
- static __forceinline__ __device__ int Ballot(int predicate)
- {
- #if defined (__CUDA_ARCH__) && (__CUDA_ARCH__ >= 200)
- return __ballot(predicate);
- #else
- __shared__ volatile int cta_buffer[CTA_SIZE];
- int tid = threadIdx.x;
- cta_buffer[tid] = predicate ? (1 << (tid & 31)) : 0;
- return warp_reduce(cta_buffer);
- #endif
- }
- struct smem
- {
- enum { TAG_MASK = (1U << ( (sizeof(unsigned int) << 3) - 5U)) - 1U };
- template<typename T>
- static __device__ __forceinline__ T atomicInc(T* address, T val)
- {
- #if defined (__CUDA_ARCH__) && (__CUDA_ARCH__ < 120)
- T count;
- unsigned int tag = threadIdx.x << ( (sizeof(unsigned int) << 3) - 5U);
- do
- {
- count = *address & TAG_MASK;
- count = tag | (count + 1);
- *address = count;
- } while (*address != count);
- return (count & TAG_MASK) - 1;
- #else
- return ::atomicInc(address, val);
- #endif
- }
- template<typename T>
- static __device__ __forceinline__ T atomicAdd(T* address, T val)
- {
- #if defined (__CUDA_ARCH__) && (__CUDA_ARCH__ < 120)
- T count;
- unsigned int tag = threadIdx.x << ( (sizeof(unsigned int) << 3) - 5U);
- do
- {
- count = *address & TAG_MASK;
- count = tag | (count + val);
- *address = count;
- } while (*address != count);
- return (count & TAG_MASK) - val;
- #else
- return ::atomicAdd(address, val);
- #endif
- }
- template<typename T>
- static __device__ __forceinline__ T atomicMin(T* address, T val)
- {
- #if defined (__CUDA_ARCH__) && (__CUDA_ARCH__ < 120)
- T count = ::min(*address, val);
- do
- {
- *address = count;
- } while (*address > count);
- return count;
- #else
- return ::atomicMin(address, val);
- #endif
- }
- };
- struct glob
- {
- static __device__ __forceinline__ int atomicAdd(int* address, int val)
- {
- return ::atomicAdd(address, val);
- }
- static __device__ __forceinline__ unsigned int atomicAdd(unsigned int* address, unsigned int val)
- {
- return ::atomicAdd(address, val);
- }
- static __device__ __forceinline__ float atomicAdd(float* address, float val)
- {
- #if __CUDA_ARCH__ >= 200
- return ::atomicAdd(address, val);
- #else
- int* address_as_i = (int*) address;
- int old = *address_as_i, assumed;
- do {
- assumed = old;
- old = ::atomicCAS(address_as_i, assumed,
- __float_as_int(val + __int_as_float(assumed)));
- } while (assumed != old);
- return __int_as_float(old);
- #endif
- }
- static __device__ __forceinline__ double atomicAdd(double* address, double val)
- {
- #if __CUDA_ARCH__ >= 130
- unsigned long long int* address_as_ull = (unsigned long long int*) address;
- unsigned long long int old = *address_as_ull, assumed;
- do {
- assumed = old;
- old = ::atomicCAS(address_as_ull, assumed,
- __double_as_longlong(val + __longlong_as_double(assumed)));
- } while (assumed != old);
- return __longlong_as_double(old);
- #else
- (void) address;
- (void) val;
- return 0.0;
- #endif
- }
- static __device__ __forceinline__ int atomicMin(int* address, int val)
- {
- return ::atomicMin(address, val);
- }
- static __device__ __forceinline__ float atomicMin(float* address, float val)
- {
- #if __CUDA_ARCH__ >= 120
- int* address_as_i = (int*) address;
- int old = *address_as_i, assumed;
- do {
- assumed = old;
- old = ::atomicCAS(address_as_i, assumed,
- __float_as_int(::fminf(val, __int_as_float(assumed))));
- } while (assumed != old);
- return __int_as_float(old);
- #else
- (void) address;
- (void) val;
- return 0.0f;
- #endif
- }
- static __device__ __forceinline__ double atomicMin(double* address, double val)
- {
- #if __CUDA_ARCH__ >= 130
- unsigned long long int* address_as_ull = (unsigned long long int*) address;
- unsigned long long int old = *address_as_ull, assumed;
- do {
- assumed = old;
- old = ::atomicCAS(address_as_ull, assumed,
- __double_as_longlong(::fmin(val, __longlong_as_double(assumed))));
- } while (assumed != old);
- return __longlong_as_double(old);
- #else
- (void) address;
- (void) val;
- return 0.0;
- #endif
- }
- static __device__ __forceinline__ int atomicMax(int* address, int val)
- {
- return ::atomicMax(address, val);
- }
- static __device__ __forceinline__ float atomicMax(float* address, float val)
- {
- #if __CUDA_ARCH__ >= 120
- int* address_as_i = (int*) address;
- int old = *address_as_i, assumed;
- do {
- assumed = old;
- old = ::atomicCAS(address_as_i, assumed,
- __float_as_int(::fmaxf(val, __int_as_float(assumed))));
- } while (assumed != old);
- return __int_as_float(old);
- #else
- (void) address;
- (void) val;
- return 0.0f;
- #endif
- }
- static __device__ __forceinline__ double atomicMax(double* address, double val)
- {
- #if __CUDA_ARCH__ >= 130
- unsigned long long int* address_as_ull = (unsigned long long int*) address;
- unsigned long long int old = *address_as_ull, assumed;
- do {
- assumed = old;
- old = ::atomicCAS(address_as_ull, assumed,
- __double_as_longlong(::fmax(val, __longlong_as_double(assumed))));
- } while (assumed != old);
- return __longlong_as_double(old);
- #else
- (void) address;
- (void) val;
- return 0.0;
- #endif
- }
- };
- };
- }}}
- #endif
|