SAUCE
Home
Events
Listing
Future
Previous
Accelerated Computing with GPUs 2020
Data Mining - Winter 20/21
High Performance Computing 2019
Einführung in die Bioinformatik WS19/20
Computational Logic
Parallel Algorithms and Architectures 2019
DSEA Praktikum 2018/19
Deep Learning 2018
High Performance Computing 2018
Parallel Algorithms and Architectures 2018
Datenstrukturen und effiziente Algorithmen Ws 18/19
EiP SoSe 18
bio-st-18
EiP WS 2017/18
High Performance Computing 2017
Datenstrukturen und effiziente Algorithmen WiSe 17/18
PS SS 2017
Einfuehrung in die Programmierung SS17
Parallel Algorithms and Architectures 2017
High Performance Computing 2016
DSEA 2016/17
EiP WS2016/17
Parallel Algorithms and Architectures 2016
PS SS 2016
Krypto SS 2016
EiP SS 2016
DSEA Praktikum WS 2015/16
DSEA WS 2015/16
News
Documentation
About
Changelog
Roadmap
Deutsche Dokumentation
Tips and Tricks
Test configuration
Language information
Contact
Login
Parallel Algorithms and Architectures 2018
Sheet 6 (Warp Intrinsics)
Interactive Programming Exercises in the Lecture
Sheet 3
Sheet 4 (Array Reversal, Determinants)
Sheet 5 (Knapsack)
Sheet 6 (Warp Intrinsics)
Sheet 7 (Sparse Matrices, Page Rank)
Sheet 8 (Streams, Multi-GPU)
Sheet 9 (Jacobi Iteration)
Task 2 (Aggregated Atomics)
Task 1 (Segmented Prefix Scan)
Task 2 (Aggregated Atomics)
Task 2 (Aggregated Atomics)
Assignment
Scaffold Head
#include <iostream> #include <cstdint> #include <assert.h> /////////////////////////////////////////////////////////////////////////////// // IGNORE THESE HELPERS (taken from https://github.com/gravitino/cudahelpers) /////////////////////////////////////////////////////////////////////////////// // safe division #define SDIV(x,y)(((x)+(y)-1)/(y)) // error makro #define CUERR { \ cudaError_t err; \ if ((err = cudaGetLastError()) != cudaSuccess) { \ std::cout << "CUDA error: " << cudaGetErrorString(err) << " : " \ << __FILE__ << ", line " << __LINE__ << std::endl; \ exit(1); \ } \ } // convenient timers #define TIMERSTART(label) \ cudaEvent_t start##label, stop##label; \ float time##label; \ cudaEventCreate(&start##label); \ cudaEventCreate(&stop##label); \ cudaEventRecord(start##label, 0); #define TIMERSTOP(label) \ cudaEventRecord(stop##label, 0); \ cudaEventSynchronize(stop##label); \ cudaEventElapsedTime(&time##label, start##label, stop##label); \ std::cout << "#" << time##label \ << " ms (" << #label << ")" << std::endl; #define BIG_CONSTANT(x) (x##LLU) struct fmix64 { __device__ __host__ __forceinline__ uint64_t operator() (uint64_t k) const { k ^= k >> 33; k *= BIG_CONSTANT(0xff51afd7ed558ccd); k ^= k >> 33; k *= BIG_CONSTANT(0xc4ceb9fe1a85ec53); k ^= k >> 33; return k; } }; struct is_odd { template <typename value_t> __device__ __host__ __forceinline__ bool operator()(const value_t& x) const { return x & 1; } }; template < typename value_t, typename index_t, typename funct_t> __global__ void fill_random( value_t * Data, index_t n, funct_t function) { const uint64_t thid = blockDim.x*blockIdx.x+threadIdx.x; const uint64_t stride = blockDim.x*gridDim.x; for (uint64_t index = thid; index < n; index += stride) Data[index] = function(index); } __device__ uint64_t atomic_inc(uint64_t * Counter) { auto ptr = (unsigned long long int*) Counter; return atomicAdd(ptr, 1); } template < typename value_t, typename index_t, typename count_t, typename funct_t> __global__ void naive_compact( value_t * Data, value_t * Filter, count_t * Counter, index_t n, funct_t function) { const uint64_t thid = blockDim.x*blockIdx.x+threadIdx.x; const uint64_t stride = blockDim.x*gridDim.x; for (uint64_t index = thid; index < n; index += stride) { const value_t entry = Data[index]; if (function(entry)) Filter[atomic_inc(Counter)] = entry; } }
Scaffold Foot
template < typename value_t, typename index_t, typename count_t, typename funct_t> __global__ void aggregated_compact( value_t * Data, value_t * Filter, count_t * Counter, index_t n, funct_t function) { const uint64_t thid = blockDim.x*blockIdx.x+threadIdx.x; const uint64_t stride = blockDim.x*gridDim.x; for (uint64_t index = thid; index < n; index += stride) { const value_t entry = Data[index]; if (function(entry)) Filter[atomic_aggregated_inc(Counter)] = entry; } } int main () { const uint64_t n = 1UL << 28; typedef uint64_t value_t; typedef uint64_t count_t; value_t * Data = nullptr, * Filter = nullptr, * filter = nullptr; uint64_t * Counter = nullptr, * counter = nullptr; cudaMalloc (&Counter, sizeof(count_t)); CUERR cudaMallocHost(&counter, sizeof(count_t)); CUERR cudaMalloc (&Data, sizeof(value_t)*n); CUERR cudaMalloc (&Filter, sizeof(value_t)*n); CUERR cudaMallocHost(&filter, sizeof(value_t)*n); CUERR TIMERSTART(fill_random) fill_random<<<1024, 1024>>>(Data, n, fmix64()); CUERR TIMERSTOP(fill_random) auto predicate = is_odd(); TIMERSTART(naive_reset) cudaMemset(Counter, 0, sizeof(count_t)); CUERR cudaMemset(Filter, 0, sizeof(value_t)*n); CUERR TIMERSTOP(naive_reset) TIMERSTART(naive_compact) naive_compact<<<1024, 1024>>>(Data, Filter, Counter, n, predicate); CUERR TIMERSTOP(naive_compact) TIMERSTART(naive_check) cudaMemcpy(counter, Counter, sizeof(count_t), cudaMemcpyDeviceToHost); CUERR cudaMemcpy(filter, Filter, sizeof(value_t)*(*counter), cudaMemcpyDeviceToHost); CUERR for (uint64_t i = 0; i < *counter; i++) { if (!predicate(filter[i])) { std::cout << "error at position " << i << std::endl; break; } } TIMERSTOP(naive_check) // now again in fast TIMERSTART(aggregated_reset) cudaMemset(Counter, 0, sizeof(count_t)); CUERR cudaMemset(Filter, 0, sizeof(value_t)*n); CUERR TIMERSTOP(aggregated_reset) TIMERSTART(aggregated_compact) aggregated_compact<<<1024, 1024>>> (Data, Filter, Counter, n, predicate); CUERR TIMERSTOP(aggregated_compact) TIMERSTART(aggregated_check) cudaMemcpy(filter, Filter, sizeof(value_t)*(*counter), cudaMemcpyDeviceToHost); CUERR for (uint64_t i = 0; i < *counter; i++) { if (!predicate(filter[i])) { std::cout << "error at position " << i << std::endl; break; } } TIMERSTOP(aggregated_check) cudaFree(Counter); CUERR cudaFreeHost(counter); CUERR cudaFree(Data); CUERR cudaFree(Filter); CUERR cudaFreeHost(filter); CUERR std::cout << "CUDA programming is fun!" << std::endl; }
Start time:
Do 12 Apr 2018 16:11:00
End time:
Fr 01 Mär 2019 12:00:00
General test timeout:
10.0 seconds
Tests
Comment prefix
#
Given input
Expected output
CUDA programming is fun!