DiFfRG
Loading...
Searching...
No Matches
integrator_angle_finiteTx0_gpu.hh
Go to the documentation of this file.
1#pragma once
2
3#ifdef __CUDACC__
4
5// standard library
6#include <future>
7
8// external libraries
9#include <rmm/cuda_stream_pool.hpp>
10#include <rmm/device_uvector.hpp>
11#include <rmm/mr/device/pool_memory_resource.hpp>
12#include <thrust/reduce.h>
13
14// DiFfRG
17
18namespace DiFfRG
19{
20 template <typename ctype, int d, typename NT, typename KERNEL, typename... T>
21 __global__ void gridreduce_angle_finiteTx0(NT *dest, const ctype *x_quadrature_p, const ctype *x_quadrature_w,
22 const ctype *ang_quadrature_p, const ctype *ang_quadrature_w,
23 const ctype *x0_quadrature_p, const ctype *x0_quadrature_w,
24 const ctype x_extent, const ctype x0_extent, const uint x0_summands,
25 const ctype m_T, const ctype k, T... t)
26 {
27 uint len_x = gridDim.x * blockDim.x;
28 uint len_y = gridDim.y * blockDim.y;
29 uint idx_x = (blockIdx.x * blockDim.x) + threadIdx.x;
30 uint idx_y = (blockIdx.y * blockDim.y) + threadIdx.y;
31 uint idx_z = (blockIdx.z * blockDim.z) + threadIdx.z;
32 uint idx = idx_z * len_x * len_y + idx_y * len_x + idx_x;
33
34 const ctype q = k * sqrt(x_quadrature_p[idx_x] * x_extent);
35 constexpr ctype S_dm1 = S_d_prec<ctype>(d - 1);
36 const ctype cos = 2 * (ang_quadrature_p[idx_y] - (ctype)0.5);
37
38 NT res = 0.;
39
40 if (idx_z >= x0_summands) {
41 const ctype integral_start = (2 * x0_summands * (ctype)M_PI * m_T) / k;
42 const ctype log_start = log(integral_start + (m_T == 0) * ctype(1e-3));
43 const ctype log_ext = log(x0_extent / (integral_start + (m_T == 0) * ctype(1e-3)));
44
45 const ctype q0 = k * (exp(log_start + log_ext * x0_quadrature_p[idx_z - x0_summands]) - (m_T == 0) * ctype(1e-3));
46
47 const ctype int_element = S_dm1 // solid nd angle
48 * (powr<d - 3>(q) / (ctype)2 * powr<2>(k)) // x = p^2 / k^2 integral
49 * (k) // x0 = q0 / k integral
50 * (1 / (ctype)2) // divide the cos integral out
51 / powr<d>(2 * (ctype)M_PI); // fourier factor
52 const ctype weight = 2 * ang_quadrature_w[idx_y] * x_quadrature_w[idx_x] * x_extent *
53 (x0_quadrature_w[idx_z - x0_summands] * log_ext * q0 / k);
54
55 res = int_element * weight * (KERNEL::kernel(q, cos, q0, k, t...) + KERNEL::kernel(q, cos, -q0, k, t...));
56 } else {
57 const ctype q0 = 2 * (ctype)M_PI * m_T * idx_z;
58 const ctype int_element = m_T * S_dm1 // solid nd angle
59 * (powr<d - 3>(q) / (ctype)2 * powr<2>(k)) // x = p^2 / k^2 integral
60 * (1 / (ctype)2) // divide the cos integral out
61 / powr<d - 1>(2 * (ctype)M_PI); // fourier factor
62 const ctype weight = 2 * ang_quadrature_w[idx_y] * x_quadrature_w[idx_x] * x_extent;
63 res = int_element * weight *
64 (idx_z == 0 ? KERNEL::kernel(q, cos, (ctype)0, k, t...)
65 : KERNEL::kernel(q, cos, q0, k, t...) + KERNEL::kernel(q, cos, -q0, k, t...));
66 }
67
68 dest[idx] = res;
69 }
70
71 template <int d, typename NT, typename KERNEL> class IntegratorAngleFiniteTx0GPU
72 {
73 public:
74 using ctype = typename get_type::ctype<NT>;
75
77 const ctype x_extent, const ctype x0_extent, const uint x0_summands,
78 const JSONValue &json)
80 json.get_double("/physical/T"), json.get_uint("/integration/cudathreadsperblock"))
81 {
82 }
83
85 const ctype x_extent, const ctype x0_extent, const uint _x0_summands, const ctype T,
86 const uint max_block_size = 256)
88 grid_sizes({_grid_sizes[0], _grid_sizes[1], _grid_sizes[2] + _x0_summands}),
90 original_x0_summands(_x0_summands),
91 pool(rmm::mr::get_current_device_resource(), (device_data_size / 256 + 1) * 256)
92 {
97
98 set_T(T);
99
100 block_sizes = {max_block_size, max_block_size, max_block_size};
101 // choose block sizes such that the size is both as close to max_block_size as possible and the individual sizes
102 // are as close to each other as possible
103 uint optimize_dim = 2;
104 while (block_sizes[0] * block_sizes[1] * block_sizes[2] > max_block_size || block_sizes[0] > grid_sizes[0] ||
105 block_sizes[1] > grid_sizes[1] || block_sizes[2] > grid_sizes[2]) {
106 if (block_sizes[optimize_dim] > 1) block_sizes[optimize_dim]--;
107 while (grid_sizes[optimize_dim] % block_sizes[optimize_dim] != 0)
108 block_sizes[optimize_dim]--;
109 optimize_dim = (optimize_dim + 2) % 3;
110 }
111
112 uint blocks1 = grid_sizes[0] / block_sizes[0];
113 uint threads1 = block_sizes[0];
114 uint blocks2 = grid_sizes[1] / block_sizes[1];
115 uint threads2 = block_sizes[1];
116 uint blocks3 = grid_sizes[2] / block_sizes[2];
117 uint threads3 = block_sizes[2];
118
119 num_blocks = dim3(blocks1, blocks2, blocks3);
120 threads_per_block = dim3(threads1, threads2, threads3);
121 }
122
133
134 void set_x0_extent(const ctype val) { x0_extent = val; }
135
149
150 template <typename... T> NT get(const ctype k, const T &...t) const
151 {
152 const auto cuda_stream = cuda_stream_pool.get_stream();
153 rmm::device_uvector<NT> device_data(device_data_size, cuda_stream, &pool);
157 check_cuda();
158 return KERNEL::constant(k, t...) + thrust::reduce(thrust::cuda::par.on(cuda_stream.value()), device_data.begin(),
159 device_data.end(), NT(0.), thrust::plus<NT>());
160 }
161
162 template <typename... T> std::future<NT> request(const ctype k, const T &...t) const
163 {
164 const auto cuda_stream = cuda_stream_pool.get_stream();
165 std::shared_ptr<rmm::device_uvector<NT>> device_data =
166 std::make_shared<rmm::device_uvector<NT>>(device_data_size, cuda_stream, &pool);
170 check_cuda();
171 const NT constant = KERNEL::constant(k, t...);
172
173 return std::async(std::launch::deferred, [=, this]() {
174 return constant + thrust::reduce(thrust::cuda::par.on(cuda_stream.value()), (*device_data).begin(),
175 (*device_data).end(), NT(0.), thrust::plus<NT>());
176 });
177 }
178
179 private:
181
182 const std::array<uint, 3> grid_sizes;
183 std::array<uint, 3> block_sizes;
184
186
193
199
202
203 using PoolMR = rmm::mr::pool_memory_resource<rmm::mr::device_memory_resource>;
204 mutable PoolMR pool;
205 const rmm::cuda_stream_pool cuda_stream_pool;
206 };
207} // namespace DiFfRG
208
209#else
210
211#ifdef USE_CUDA
212
213namespace DiFfRG
214{
215 template <int d, typename NT, typename KERNEL> class IntegratorAngleFiniteTx0GPU;
216}
217
218#else
219
221
222namespace DiFfRG
223{
224 template <int d, typename NT, typename KERNEL>
225 class IntegratorAngleFiniteTx0GPU : public IntegratorAngleFiniteTx0TBB<d, NT, KERNEL>
226 {
227 public:
228 using ctype = typename get_type::ctype<NT>;
229
230 IntegratorAngleFiniteTx0GPU(QuadratureProvider &quadrature_provider, const std::array<uint, 3> _grid_sizes,
231 const ctype x_extent, const ctype x0_extent, const uint x0_summands, const ctype T,
232 const uint max_block_size = 256)
233 : IntegratorAngleFiniteTx0TBB<d, NT, KERNEL>(quadrature_provider, _grid_sizes, x_extent, x0_extent, x0_summands,
234 T, max_block_size)
235 {
236 }
237 };
238} // namespace DiFfRG
239
240#endif
241
242#endif
Definition integrator_angle_finiteTx0_gpu.hh:72
const uint device_data_size
Definition integrator_angle_finiteTx0_gpu.hh:185
const ctype * ptr_x_quadrature_w
Definition integrator_angle_finiteTx0_gpu.hh:188
const std::array< uint, 3 > grid_sizes
Definition integrator_angle_finiteTx0_gpu.hh:182
const ctype * ptr_x_quadrature_p
Definition integrator_angle_finiteTx0_gpu.hh:187
dim3 threads_per_block
Definition integrator_angle_finiteTx0_gpu.hh:201
uint x0_summands
Definition integrator_angle_finiteTx0_gpu.hh:197
QuadratureProvider & quadrature_provider
Definition integrator_angle_finiteTx0_gpu.hh:180
const rmm::cuda_stream_pool cuda_stream_pool
Definition integrator_angle_finiteTx0_gpu.hh:205
const ctype * ptr_x0_quadrature_w
Definition integrator_angle_finiteTx0_gpu.hh:192
const ctype * ptr_ang_quadrature_w
Definition integrator_angle_finiteTx0_gpu.hh:190
const ctype * ptr_x0_quadrature_p
Definition integrator_angle_finiteTx0_gpu.hh:191
IntegratorAngleFiniteTx0GPU(QuadratureProvider &quadrature_provider, const std::array< uint, 3 > _grid_sizes, const ctype x_extent, const ctype x0_extent, const uint _x0_summands, const ctype T, const uint max_block_size=256)
Definition integrator_angle_finiteTx0_gpu.hh:84
PoolMR pool
Definition integrator_angle_finiteTx0_gpu.hh:204
void set_x0_extent(const ctype val)
Definition integrator_angle_finiteTx0_gpu.hh:134
IntegratorAngleFiniteTx0GPU(const IntegratorAngleFiniteTx0GPU &other)
Definition integrator_angle_finiteTx0_gpu.hh:136
typename get_type::ctype< NT > ctype
Definition integrator_angle_finiteTx0_gpu.hh:74
IntegratorAngleFiniteTx0GPU(QuadratureProvider &quadrature_provider, const std::array< uint, 3 > grid_sizes, const ctype x_extent, const ctype x0_extent, const uint x0_summands, const JSONValue &json)
Definition integrator_angle_finiteTx0_gpu.hh:76
dim3 num_blocks
Definition integrator_angle_finiteTx0_gpu.hh:200
void set_T(const ctype T)
Definition integrator_angle_finiteTx0_gpu.hh:123
std::array< uint, 3 > block_sizes
Definition integrator_angle_finiteTx0_gpu.hh:183
const uint original_x0_summands
Definition integrator_angle_finiteTx0_gpu.hh:196
std::future< NT > request(const ctype k, const T &...t) const
Definition integrator_angle_finiteTx0_gpu.hh:162
const ctype * ptr_ang_quadrature_p
Definition integrator_angle_finiteTx0_gpu.hh:189
ctype m_T
Definition integrator_angle_finiteTx0_gpu.hh:198
ctype x0_extent
Definition integrator_angle_finiteTx0_gpu.hh:195
const ctype x_extent
Definition integrator_angle_finiteTx0_gpu.hh:194
rmm::mr::pool_memory_resource< rmm::mr::device_memory_resource > PoolMR
Definition integrator_angle_finiteTx0_gpu.hh:203
NT get(const ctype k, const T &...t) const
Definition integrator_angle_finiteTx0_gpu.hh:150
A wrapper around the boost json value class.
Definition json.hh:19
A class that provides quadrature points and weights, in host and device memory. The quadrature points...
Definition quadrature_provider.hh:139
const NT * get_device_weights(const size_t order, const int device=0, const QuadratureType type=QuadratureType::legendre)
Get the device-side quadrature weights for a quadrature of size quadrature_size.
Definition quadrature_provider.hh:211
const NT * get_device_points(const size_t order, const int device=0, const QuadratureType type=QuadratureType::legendre)
Get the device-side quadrature points for a quadrature of size quadrature_size.
Definition quadrature_provider.hh:198
typename internal::_ctype< CT >::value ctype
Definition types.hh:106
Definition complex_math.hh:14
constexpr __forceinline__ __host__ __device__ NumberType powr(const NumberType x)
A compile-time evaluatable power function for whole number exponents.
Definition math.hh:45
__global__ void gridreduce_angle_finiteTx0(NT *dest, const ctype *x_quadrature_p, const ctype *x_quadrature_w, const ctype *ang_quadrature_p, const ctype *ang_quadrature_w, const ctype *x0_quadrature_p, const ctype *x0_quadrature_w, const ctype x_extent, const ctype x0_extent, const uint x0_summands, const ctype m_T, const ctype k, T... t)
Definition integrator_angle_finiteTx0_gpu.hh:21
bool __forceinline__ __host__ __device__ is_close(T1 a, T2 b, T3 eps_)
Function to evaluate whether two floats are equal to numerical precision. Tests for both relative and...
Definition math.hh:160
consteval NT S_d_prec(uint d)
Surface of a d-dimensional sphere (precompiled)
Definition math.hh:104
void check_cuda(std::string prefix="")
Check if a CUDA error occurred and print an error message if it did.
unsigned int uint
Definition utils.hh:22