DiFfRG
Loading...
Searching...
No Matches
integrator_3Dpx0_gpu.hh
Go to the documentation of this file.
1#pragma once
2
3#ifdef __CUDACC__
4
5// standard library
6#include <future>
7
8// external libraries
9#include <rmm/cuda_stream_pool.hpp>
10#include <rmm/device_uvector.hpp>
11#include <rmm/mr/device/pool_memory_resource.hpp>
12#include <thrust/reduce.h>
13
14// DiFfRG
17
18namespace DiFfRG
19{
20 template <typename ctype, typename NT, typename KERNEL, typename... T>
21 __global__ void gridreduce_3dpx0(NT *dest, const ctype *x_quadrature_p, const ctype *x_quadrature_w,
22 const ctype *ang_quadrature_p, const ctype *ang_quadrature_w,
23 const ctype *x0_quadrature_p, const ctype *x0_quadrature_w, const ctype x_extent,
24 const ctype x0_extent, const ctype k, T... t)
25 {
26 uint len_x = gridDim.x * blockDim.x;
27 uint len_y = gridDim.y * blockDim.y;
28 uint idx_x = (blockIdx.x * blockDim.x) + threadIdx.x;
29 uint idx_y = (blockIdx.y * blockDim.y) + threadIdx.y;
30 uint idx_z = (blockIdx.z * blockDim.z) + threadIdx.z;
31 uint idx = idx_z * len_x * len_y + idx_y * len_x + idx_x;
32
33 const ctype cos = 2 * (ang_quadrature_p[idx_y] - (ctype)0.5);
34 const ctype phi = 2 * (ctype)M_PI * ang_quadrature_p[idx_z / len_y];
35 const ctype q0 = k * x0_quadrature_p[idx_z % len_y] * x0_extent;
36 const ctype weight = x0_quadrature_w[idx_z % len_y] * x0_extent * 2 * (ctype)M_PI *
37 ang_quadrature_w[idx_z / len_y] * 2 * ang_quadrature_w[idx_y] * x_quadrature_w[idx_x] *
38 x_extent;
39 const ctype q = k * sqrt(x_quadrature_p[idx_x] * x_extent);
40 const ctype int_element = (powr<1>(q) / (ctype)2 * powr<2>(k)) // x = p^2 / k^2 integral
41 * (k) // x0 = q0 / k integral
42 / powr<4>(2 * (ctype)M_PI); // fourier factor
43
44 NT res = KERNEL::kernel(q, cos, phi, q0, k, t...) + KERNEL::kernel(q, cos, phi, -q0, k, t...);
45
46 dest[idx] = int_element * res * weight;
47 }
48
49 template <typename NT, typename KERNEL> class Integrator3Dpx0GPU
50 {
51 public:
55 using ctype = typename get_type::ctype<NT>;
56
57 Integrator3Dpx0GPU(QuadratureProvider &quadrature_provider, const std::array<uint, 4> grid_sizes,
58 const ctype x_extent, const ctype x0_extent, const JSONValue &json)
59 : Integrator3Dpx0GPU(quadrature_provider, grid_sizes, x_extent, x0_extent,
60 json.get_uint("/integration/cudathreadsperblock"))
61 {
62 }
63
64 Integrator3Dpx0GPU(QuadratureProvider &quadrature_provider, std::array<uint, 4> grid_sizes, const ctype x_extent,
65 const ctype x0_extent, const uint max_block_size = 256)
68 pool(rmm::mr::get_current_device_resource(), (device_data_size / 256 + 1) * 256)
69 {
70 if (grid_sizes[2] != grid_sizes[1])
71 throw std::runtime_error("Grid sizes must be currently equal for all angular dimensions!");
72
73 ptr_x_quadrature_p = quadrature_provider.get_device_points(grid_sizes[0]);
74 ptr_x_quadrature_w = quadrature_provider.get_device_weights(grid_sizes[0]);
75 ptr_ang_quadrature_p = quadrature_provider.get_device_points(grid_sizes[1]);
76 ptr_ang_quadrature_w = quadrature_provider.get_device_weights(grid_sizes[1]);
77 ptr_x0_quadrature_p = quadrature_provider.get_device_points(grid_sizes[3]);
78 ptr_x0_quadrature_w = quadrature_provider.get_device_weights(grid_sizes[3]);
79
80 block_sizes = {max_block_size, max_block_size, max_block_size};
81 // choose block sizes such that the size is both as close to max_block_size as possible and the individual sizes
82 // are as close to each other as possible
83 uint optimize_dim = 2;
84 while (block_sizes[0] * block_sizes[1] * block_sizes[2] > max_block_size || block_sizes[0] > grid_sizes[0] ||
85 block_sizes[1] > grid_sizes[1] || block_sizes[2] > grid_sizes[2] * grid_sizes[3]) {
86 if (block_sizes[optimize_dim] > 1) block_sizes[optimize_dim]--;
87 while (grid_sizes[optimize_dim] % block_sizes[optimize_dim] != 0)
88 block_sizes[optimize_dim]--;
89 optimize_dim = (optimize_dim + 2) % 3;
90 }
91
92 uint blocks1 = grid_sizes[0] / block_sizes[0];
93 uint threads1 = block_sizes[0];
94 uint blocks2 = grid_sizes[1] / block_sizes[1];
95 uint threads2 = block_sizes[1];
96 uint blocks3 = grid_sizes[2] * grid_sizes[3] / block_sizes[2];
97 uint threads3 = block_sizes[2];
98
99 num_blocks = dim3(blocks1, blocks2, blocks3);
100 threads_per_block = dim3(threads1, threads2, threads3);
101 }
102
115
116 template <typename... T> NT get(const ctype k, const T &...t) const
117 {
118 const auto cuda_stream = cuda_stream_pool.get_stream();
119 rmm::device_uvector<NT> device_data(device_data_size, cuda_stream, &pool);
123 check_cuda();
124 return KERNEL::constant(k, t...) + thrust::reduce(thrust::cuda::par.on(cuda_stream.value()), device_data.begin(),
125 device_data.end(), NT(0.), thrust::plus<NT>());
126 }
127
138 template <typename... T> std::future<NT> request(const ctype k, const T &...t) const
139 {
140 const auto cuda_stream = cuda_stream_pool.get_stream();
141 std::shared_ptr<rmm::device_uvector<NT>> device_data =
142 std::make_shared<rmm::device_uvector<NT>>(device_data_size, cuda_stream, &pool);
146 check_cuda();
147 const NT constant = KERNEL::constant(k, t...);
148
149 return std::async(std::launch::deferred, [=, this]() {
150 return constant + thrust::reduce(thrust::cuda::par.on(cuda_stream.value()), (*device_data).begin(),
151 (*device_data).end(), NT(0.), thrust::plus<NT>());
152 });
153 }
154
155 private:
156 const std::array<uint, 4> grid_sizes;
157 std::array<uint, 3> block_sizes;
158
160
167
170
173
174 using PoolMR = rmm::mr::pool_memory_resource<rmm::mr::device_memory_resource>;
175 mutable PoolMR pool;
176 const rmm::cuda_stream_pool cuda_stream_pool;
177 };
178} // namespace DiFfRG
179
180#else
181
182#ifdef USE_CUDA
183
184#else
185
187
188namespace DiFfRG
189{
190 template <typename NT, typename KERNEL> class Integrator3Dpx0GPU : public Integrator3Dpx0TBB<NT, KERNEL>
191 {
192 public:
193 using ctype = typename get_type::ctype<NT>;
194
195 Integrator3Dpx0GPU(QuadratureProvider &quadrature_provider, std::array<uint, 4> grid_sizes, const ctype x_extent,
196 const ctype x0_extent, const uint max_block_size = 256)
197 : Integrator3Dpx0TBB<NT, KERNEL>(quadrature_provider, grid_sizes, x_extent, x0_extent)
198 {
199 }
200
201 Integrator3Dpx0GPU(QuadratureProvider &quadrature_provider, const std::array<uint, 4> grid_sizes,
202 const ctype x_extent, const ctype x0_extent, const JSONValue &)
203 : Integrator3Dpx0TBB<NT, KERNEL>(quadrature_provider, grid_sizes, x_extent, x0_extent)
204 {
205 }
206 };
207} // namespace DiFfRG
208
209#endif
210
211#endif
Definition integrator_3Dpx0_gpu.hh:50
std::array< uint, 3 > block_sizes
Definition integrator_3Dpx0_gpu.hh:157
uint device_data_size
Definition integrator_3Dpx0_gpu.hh:159
const ctype x0_extent
Definition integrator_3Dpx0_gpu.hh:169
const ctype * ptr_x_quadrature_w
Definition integrator_3Dpx0_gpu.hh:162
PoolMR pool
Definition integrator_3Dpx0_gpu.hh:175
typename get_type::ctype< NT > ctype
Numerical type to be used for integration tasks e.g. the argument or possible jacobians.
Definition integrator_3Dpx0_gpu.hh:55
const rmm::cuda_stream_pool cuda_stream_pool
Definition integrator_3Dpx0_gpu.hh:176
dim3 num_blocks
Definition integrator_3Dpx0_gpu.hh:171
const ctype * ptr_x_quadrature_p
Definition integrator_3Dpx0_gpu.hh:161
std::future< NT > request(const ctype k, const T &...t) const
Request a future for the integral of the kernel.
Definition integrator_3Dpx0_gpu.hh:138
NT get(const ctype k, const T &...t) const
Definition integrator_3Dpx0_gpu.hh:116
Integrator3Dpx0GPU(QuadratureProvider &quadrature_provider, std::array< uint, 4 > grid_sizes, const ctype x_extent, const ctype x0_extent, const uint max_block_size=256)
Definition integrator_3Dpx0_gpu.hh:64
dim3 threads_per_block
Definition integrator_3Dpx0_gpu.hh:172
const ctype x_extent
Definition integrator_3Dpx0_gpu.hh:168
const std::array< uint, 4 > grid_sizes
Definition integrator_3Dpx0_gpu.hh:156
Integrator3Dpx0GPU(QuadratureProvider &quadrature_provider, const std::array< uint, 4 > grid_sizes, const ctype x_extent, const ctype x0_extent, const JSONValue &json)
Definition integrator_3Dpx0_gpu.hh:57
const ctype * ptr_x0_quadrature_p
Definition integrator_3Dpx0_gpu.hh:165
const ctype * ptr_ang_quadrature_p
Definition integrator_3Dpx0_gpu.hh:163
const ctype * ptr_x0_quadrature_w
Definition integrator_3Dpx0_gpu.hh:166
Integrator3Dpx0GPU(const Integrator3Dpx0GPU &other)
Definition integrator_3Dpx0_gpu.hh:103
const ctype * ptr_ang_quadrature_w
Definition integrator_3Dpx0_gpu.hh:164
rmm::mr::pool_memory_resource< rmm::mr::device_memory_resource > PoolMR
Definition integrator_3Dpx0_gpu.hh:174
A wrapper around the boost json value class.
Definition json.hh:19
A class that provides quadrature points and weights, in host and device memory. The quadrature points...
Definition quadrature_provider.hh:139
const NT * get_device_weights(const size_t order, const int device=0, const QuadratureType type=QuadratureType::legendre)
Get the device-side quadrature weights for a quadrature of size quadrature_size.
Definition quadrature_provider.hh:211
const NT * get_device_points(const size_t order, const int device=0, const QuadratureType type=QuadratureType::legendre)
Get the device-side quadrature points for a quadrature of size quadrature_size.
Definition quadrature_provider.hh:198
typename internal::_ctype< CT >::value ctype
Definition types.hh:106
Definition complex_math.hh:14
constexpr __forceinline__ __host__ __device__ NumberType powr(const NumberType x)
A compile-time evaluatable power function for whole number exponents.
Definition math.hh:45
void check_cuda(std::string prefix="")
Check if a CUDA error occurred and print an error message if it did.
unsigned int uint
Definition utils.hh:22
__global__ void gridreduce_3dpx0(NT *dest, const ctype *x_quadrature_p, const ctype *x_quadrature_w, const ctype *ang_quadrature_p, const ctype *ang_quadrature_w, const ctype *x0_quadrature_p, const ctype *x0_quadrature_w, const ctype x_extent, const ctype x0_extent, const ctype k, T... t)
Definition integrator_3Dpx0_gpu.hh:21