zoukankan      html  css  js  c++  java
  • C++中如何使用CUDA自己实现常用的深度学习激活函数?| how to implement deep learning activation kernels with cuda in c++

    本文首发于个人博客https://kezunlin.me/post/ee123cac/,欢迎阅读最新内容!

    how to implement deep learning activation kernels with cuda in c++

    Guide

    cuda utils

    cuda.h

    #ifndef __CUDA_H_
    #define __CUDA_H_
    #include "cuda_runtime.h"
    #include "curand.h"
    #include "cublas_v2.h"
    
    #define BLOCK 512
    
    void check_error(cudaError_t status);
    
    dim3 cuda_gridsize(size_t n);
    
    float* cuda_make_array(float* x,size_t n);
    
    void cuda_free(float* x_gpu);
    
    void cuda_push_array(float *x_gpu,float* x,size_t n);
    
    void cuda_pull_array(float *x_gpu,float* x,size_t n);
    
    
    #endif
    
    

    cuda.cpp

    #include "cuda.h"
    #include "blas.h"
    
    #include <assert.h>
    #include <stdlib.h>
    #include <time.h>
    #include <stdio.h>
    
    void error(const char* s)
    {
        perror(s);
        assert(0);
        exit(-1);
    }
    
    void check_error(cudaError_t status)
    {
        //cudaDeviceSynchronize();
        cudaError_t status2 = cudaGetLastError();
        if (status != cudaSuccess)
        {   
            const char *s = cudaGetErrorString(status);
            char buffer[256];
            printf("CUDA Error: %s
    ", s);
            assert(0);
            snprintf(buffer, 256, "CUDA Error: %s", s);
            error(buffer);
        } 
        if (status2 != cudaSuccess)
        {   
            const char *s = cudaGetErrorString(status);
            char buffer[256];
            printf("CUDA Error Prev: %s
    ", s);
            assert(0);
            snprintf(buffer, 256, "CUDA Error Prev: %s", s);
            error(buffer);
        } 
    }
    
    dim3 cuda_gridsize(size_t n){
        size_t k = (n-1) / BLOCK + 1;
        size_t x = k;
        size_t y = 1;
        if(x > 65535){
            x = ceil(sqrt(k));
            y = (n-1)/(x*BLOCK) + 1;
        }
        dim3 d = {x, y, 1};
        //printf("%ld %ld %ld %ld
    ", n, x, y, x*y*BLOCK);
        return d;
    }
    
    float* cuda_make_array(float* x,size_t n)
    {
        float *x_gpu;
        size_t size = sizeof(float)*n;
        cudaError_t status = cudaMalloc((void **)&x_gpu, size);
        check_error(status);
        if(x){
            status = cudaMemcpy(x_gpu, x, size, cudaMemcpyHostToDevice);
            check_error(status);
        } else {
            fill_gpu(n, 0, x_gpu, 1);
        }
        if(!x_gpu) error("Cuda malloc failed
    ");
        return x_gpu;
    }
    
    void cuda_free(float* x_gpu)
    {
        cudaError_t status = cudaFree(x_gpu);
        check_error(status);
    }
    
    void cuda_push_array(float *x_gpu,float* x,size_t n)
    {
        size_t size = sizeof(float)*n;
        cudaError_t status = cudaMemcpy(x_gpu,x,size,cudaMemcpyHostToDevice);
        check_error(status);
    }
    
    void cuda_pull_array(float *x_gpu,float* x,size_t n)
    {
        size_t size = sizeof(float)*n;
        cudaError_t status = cudaMemcpy(x,x_gpu,size,cudaMemcpyDeviceToHost);
        check_error(status);
    }
    
    

    activation kernels

    activations.h

    #ifndef __ACTIVATIONS_H_
    #define __ACTIVATIONS_H_
    
    typedef enum{
        LOGISTIC, RELU, RELIE, LINEAR, RAMP, TANH, PLSE, 
        LEAKY, ELU, LOGGY, STAIR, HARDTAN, LHTAN
    } ACTIVATION;
    
    void activate_array_gpu(float* x,int n,ACTIVATION a);
    
    #endif
    

    activation_kernels.cu

    #include "activations.h"
    #include "cuda.h"
    #include "blas.h"
    
    __device__ float lhtan_activate_kernel(float x)
    {
        if(x < 0) return .001f*x;
        if(x > 1) return .001f*(x-1.f) + 1.f;
        return x;
    }
    
    __device__ float hardtan_activate_kernel(float x)
    {
        if (x < -1) return -1;
        if (x > 1) return 1;
        return x;
    }
    
    __device__ float linear_activate_kernel(float x){return x;}
    __device__ float logistic_activate_kernel(float x){return 1.f/(1.f + expf(-x));}
    __device__ float loggy_activate_kernel(float x){return 2.f/(1.f + expf(-x)) - 1;}
    __device__ float relu_activate_kernel(float x){return x*(x>0);}
    __device__ float elu_activate_kernel(float x){return (x >= 0)*x + (x < 0)*(expf(x)-1);}
    __device__ float relie_activate_kernel(float x){return (x>0) ? x : .01f*x;}
    __device__ float ramp_activate_kernel(float x){return x*(x>0)+.1f*x;}
    __device__ float leaky_activate_kernel(float x){return (x>0) ? x : .1f*x;}
    __device__ float tanh_activate_kernel(float x){return (2.f/(1 + expf(-2*x)) - 1);}
    __device__ float plse_activate_kernel(float x)
    {
        if(x < -4) return .01f * (x + 4);
        if(x > 4)  return .01f * (x - 4) + 1;
        return .125f*x + .5f;
    }
    __device__ float stair_activate_kernel(float x)
    {
        int n = floorf(x);
        if (n%2 == 0) return floorf(x/2);
        else return (x - n) + floorf(x/2);
    }
    
    __device__ float activate_kernel(float x, ACTIVATION a)
    {
        switch(a){
            case LINEAR:
                return linear_activate_kernel(x);
            case LOGISTIC:
                return logistic_activate_kernel(x);
            case LOGGY:
                return loggy_activate_kernel(x);
            case RELU:
                return relu_activate_kernel(x);
            case ELU:
                return elu_activate_kernel(x);
            case RELIE:
                return relie_activate_kernel(x);
            case RAMP:
                return ramp_activate_kernel(x);
            case LEAKY:
                return leaky_activate_kernel(x);
            case TANH:
                return tanh_activate_kernel(x);
            case PLSE:
                return plse_activate_kernel(x);
            case STAIR:
                return stair_activate_kernel(x);
            case HARDTAN:
                return hardtan_activate_kernel(x);
            case LHTAN:
                return lhtan_activate_kernel(x);
        }
        return 0;
    }
    
    __global__ void activate_array_kernel(float *x, int n, ACTIVATION a)
    {
        int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
        if(i < n) x[i] = activate_kernel(x[i], a);
    }
    
    void activate_array_gpu(float *x, int n, ACTIVATION a)
    {
        activate_array_kernel<<<cuda_gridsize(n), BLOCK>>>(x, n, a);
        check_error(cudaPeekAtLastError());
    }
    
    

    Reference

    History

    • 20191014: created.

    Copyright

  • 相关阅读:
    腾讯安全上海游戏部门笔试题
    2017
    2016
    2015
    2014
    2013
    2012
    2011
    2010
    2009
  • 原文地址:https://www.cnblogs.com/kezunlin/p/12097328.html
Copyright © 2011-2022 走看看