clock 学习记录
- 一、完整代码
- 二、核函数流程
- 三、main 函数流程
- 四、学习总结(共享内存的声明和使用):
- 4.1、例子
- 4.2、数据从全局内存复制到共享内存:
该程序利用CUDA并行计算能力,执行归约操作以找到每个块内的最小值,并使用 clock()
函数测量每个块的执行时间。主函数管理CUDA环境和内存,并处理计时数据以评估算法的性能
一、完整代码
// System includes
#include <assert.h>
#include <stdint.h>
#include <stdio.h>// CUDA runtime
#include <cuda_runtime.h>// helper functions and utilities to work with CUDA
#include <helper_cuda.h>
#include <helper_functions.h>// This kernel computes a standard parallel reduction and evaluates the
// time it takes to do that for each block. The timing results are stored
// in device memory.
__global__ static void timedReduction(const float *input, float *output,clock_t *timer) {// __shared__ float shared[2 * blockDim.x];extern __shared__ float shared[];const int tid = threadIdx.x;const int bid = blockIdx.x;if (tid == 0) timer[bid] = clock();// Copy input.shared[tid] = input[tid];shared[tid + blockDim.x] = input[tid + blockDim.x];// Perform reduction to find minimum.for (int d = blockDim.x; d > 0; d /= 2) {__syncthreads();if (tid < d) {float f0 = shared[tid];float f1 = shared[tid + d];if (f1 < f0) {shared[tid] = f1;}}}// Write result.if (tid == 0) output[bid] = shared[0];__syncthreads();if (tid == 0) timer[bid + gridDim.x] = clock();
}#define NUM_BLOCKS 64
#define NUM_THREADS 256// It's interesting to change the number of blocks and the number of threads to
// understand how to keep the hardware busy.
//
// Here are some numbers I get on my G80:
// blocks - clocks
// 1 - 3096
// 8 - 3232
// 16 - 3364
// 32 - 4615
// 64 - 9981
//
// With less than 16 blocks some of the multiprocessors of the device are idle.
// With more than 16 you are using all the multiprocessors, but there's only one
// block per multiprocessor and that doesn't allow you to hide the latency of
// the memory. With more than 32 the speed scales linearly.// Start the main CUDA Sample here
int main(int argc, char **argv) {printf("CUDA Clock sample\n");// This will pick the best possible CUDA capable deviceint dev = findCudaDevice(argc, (const char **)argv);float *dinput = NULL;float *doutput = NULL;clock_t *dtimer = NULL;clock_t timer[NUM_BLOCKS * 2];float input[NUM_THREADS * 2];for (int i = 0; i < NUM_THREADS * 2; i++) {input[i] = (float)i;}checkCudaErrors(cudaMalloc((void **)&dinput, sizeof(float) * NUM_THREADS * 2));checkCudaErrors(cudaMalloc((void **)&doutput, sizeof(float) * NUM_BLOCKS));checkCudaErrors(cudaMalloc((void **)&dtimer, sizeof(clock_t) * NUM_BLOCKS * 2));checkCudaErrors(cudaMemcpy(dinput, input, sizeof(float) * NUM_THREADS * 2,cudaMemcpyHostToDevice));timedReduction<<<NUM_BLOCKS, NUM_THREADS, sizeof(float) * 2 * NUM_THREADS>>>(dinput, doutput, dtimer);checkCudaErrors(cudaMemcpy(timer, dtimer, sizeof(clock_t) * NUM_BLOCKS * 2,cudaMemcpyDeviceToHost));checkCudaErrors(cudaFree(dinput));checkCudaErrors(cudaFree(doutput));checkCudaErrors(cudaFree(dtimer));long double avgElapsedClocks = 0;for (int i = 0; i < NUM_BLOCKS; i++) {avgElapsedClocks += (long double)(timer[i + NUM_BLOCKS] - timer[i]);}avgElapsedClocks = avgElapsedClocks / NUM_BLOCKS;printf("Average clocks/block = %Lf\n", avgElapsedClocks);return EXIT_SUCCESS;
}
二、核函数流程
核函数 timedReduction:
__global__ static void timedReduction(const float *input, float *output,clock_t *timer) {extern __shared__ float shared[];const int tid = threadIdx.x; // 线程在块内的索引const int bid = blockIdx.x; // 块的索引// 记录每个块的开始时间if (tid == 0) timer[bid] = clock();// 复制输入数据到共享内存中shared[tid] = input[tid];shared[tid + blockDim.x] = input[tid + blockDim.x];// 执行归约操作以找到最小值for (int d = blockDim.x; d > 0; d /= 2) {__syncthreads();if (tid < d) {float f0 = shared[tid];float f1 = shared[tid + d];if (f1 < f0) {shared[tid] = f1;}}}// 将结果写入全局内存if (tid == 0) output[bid] = shared[0];__syncthreads();// 记录每个块的结束时间if (tid == 0) timer[bid + gridDim.x] = clock();
}
语法解释:
(这里假设每个线程负责复制两个元素,因此 blockDim.x
是块中的线程数。)
三、main 函数流程
int main(int argc, char **argv) {// 初始化CUDA设备int dev = findCudaDevice(argc, (const char **)argv);// 主机和设备内存分配float *dinput = NULL;float *doutput = NULL;clock_t *dtimer = NULL;clock_t timer[NUM_BLOCKS * 2];float input[NUM_THREADS * 2];// 初始化输入数据for (int i = 0; i < NUM_THREADS * 2; i++) {input[i] = (float)i;}// CUDA内存分配checkCudaErrors(cudaMalloc((void **)&dinput, sizeof(float) * NUM_THREADS * 2));checkCudaErrors(cudaMalloc((void **)&doutput, sizeof(float) * NUM_BLOCKS));checkCudaErrors(cudaMalloc((void **)&dtimer, sizeof(clock_t) * NUM_BLOCKS * 2));// 将输入数据从主机复制到设备checkCudaErrors(cudaMemcpy(dinput, input, sizeof(float) * NUM_THREADS * 2,cudaMemcpyHostToDevice));// 启动核函数timedReduction<<<NUM_BLOCKS, NUM_THREADS, sizeof(float) * 2 * NUM_THREADS>>>(dinput, doutput, dtimer);// 将计时数据从设备复制回主机checkCudaErrors(cudaMemcpy(timer, dtimer, sizeof(clock_t) * NUM_BLOCKS * 2,cudaMemcpyDeviceToHost));// 释放分配的内存checkCudaErrors(cudaFree(dinput));checkCudaErrors(cudaFree(doutput));checkCudaErrors(cudaFree(dtimer));// 计算每个块的平均时钟周期数long double avgElapsedClocks = 0;for (int i = 0; i < NUM_BLOCKS; i++) {avgElapsedClocks += (long double)(timer[i + NUM_BLOCKS] - timer[i]);}avgElapsedClocks = avgElapsedClocks / NUM_BLOCKS;// 输出每个块的平均时钟周期数printf("Average clocks/block = %Lf\n", avgElapsedClocks);return EXIT_SUCCESS;
}
四、学习总结(共享内存的声明和使用):
在CUDA编程中,共享内存是一种特殊的内存类型,它是在块级别
共享的。共享内存的主要优势在于其低延迟和高带宽
,适合于需要快速访问和多次读写的数据。
4.1、例子
__global__ void reductionKernel(const float *input, float *output) {// 假设每个块的大小是 blockDim.x,即块内的线程数__shared__ float shared[256]; // 声明256个float类型的共享内存数组,大小在编译时确定int tid = threadIdx.x; // 线程在块内的索引int bid = blockIdx.x; // 块的索引// 将数据从全局内存复制到共享内存shared[tid] = input[bid * blockDim.x + tid];__syncthreads(); // 确保所有线程都已经完成共享内存的数据复制// 归约操作以找到块内的最小值for (int stride = blockDim.x / 2; stride > 0; stride /= 2) {if (tid < stride) {shared[tid] = min(shared[tid], shared[tid + stride]);}__syncthreads(); // 确保所有线程完成本次循环的操作}// 将块内最小值写回全局内存if (tid == 0) {output[bid] = shared[0];}
}
4.2、数据从全局内存复制到共享内存:
shared[tid] = input[bid * blockDim.x + tid];
每个线程根据自己的 threadIdx.x 从全局输入数组 input 中读取数据,并将其存储在共享内存的 shared 数组中。bid * blockDim.x + tid 计算出每个线程在输入数组中的索引位置。
bid
:表示当前线程所在的块的索引。在 CUDA 编程中,blockIdx.x
变量给出了当前线程块的全局索引。blockDim.x
:表示每个线程块中的线程数量。在 CUDA 中,blockDim.x
是一个内置变量,它给出了当前线程块的线程数量。
因此,bid * blockDim.x
就是当前块中的第一个线程在输入数组中的起始位置。这是因为每个线程块在处理输入数据时,通常会按照块大小分配数据段。例如,如果每个线程块有 blockDim.x = 256 个线程,则第一个线程块 (blockIdx.x = 0) 处理的输入数据范围是从索引 0 到 255。
tid
:表示当前线程在其所属块中的索引。在 CUDA 编程中,threadIdx.x
变量给出了当前线程在其线程块中的局部索引。
因此,bid * blockDim.x + tid
就是当前线程在整个输入数组中的全局索引位置。每个线程根据其在块内的索引 tid
访问输入数组的对应元素,而 bid * blockDim.x
确定了当前块在输入数组中的起始位置。