Neko-TOP
A portable framework for high-order spectral element flow toplogy optimization.
Loading...
Searching...
No Matches
math_ext_kernel.h
Go to the documentation of this file.
1/*
2 Copyright (c) 2021-2023, The Neko Authors
3 All rights reserved.
4
5 Redistribution and use in source and binary forms, with or without
6 modification, are permitted provided that the following conditions
7 are met:
8
9 * Redistributions of source code must retain the above copyright
10 notice, this list of conditions and the following disclaimer.
11
12 * Redistributions in binary form must reproduce the above
13 copyright notice, this list of conditions and the following
14 disclaimer in the documentation and/or other materials provided
15 with the distribution.
16
17 * Neither the name of the authors nor the names of its
18 contributors may be used to endorse or promote products derived
19 from this software without specific prior written permission.
20
21 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
29 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
31 ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE.
33*/
34
35#ifndef __NEKO_CUDA_MATH_EXT_KERNELS__
36#define __NEKO_CUDA_MATH_EXT_KERNELS__
37
41template <typename T>
43 T* __restrict__ a, T* __restrict__ b, const int size,
44 int* __restrict__ mask, const int mask_size) {
45
46 const int idx = blockIdx.x * blockDim.x + threadIdx.x;
47 const int str = blockDim.x * gridDim.x;
48
49 for (int i = idx; i < mask_size; i += str) {
50 a[mask[i]-1] = b[mask[i]-1];
51 }
52}
53
57template <typename T>
59 T* __restrict__ a, const T c, const int size, int* __restrict__ mask,
60 const int mask_size) {
61
62 const int idx = blockIdx.x * blockDim.x + threadIdx.x;
63 const int str = blockDim.x * gridDim.x;
64
65 for (int i = idx; i < mask_size; i += str) {
66 a[mask[i]-1] = a[mask[i]-1] + c;
67 }
68}
69
73template <typename T>
75 T* __restrict__ a, const int size, int* __restrict__ mask,
76 const int mask_size) {
77
78 const int idx = blockIdx.x * blockDim.x + threadIdx.x;
79 const int str = blockDim.x * gridDim.x;
80
81 for (int i = idx; i < mask_size; i += str) {
82 a[mask[i]-1] = 1.0 / a[mask[i]-1];
83 }
84}
85
89template <typename T>
91 T* __restrict__ a, T* __restrict__ b, const int size,
92 int* __restrict__ mask, const int mask_size) {
93
94 const int idx = blockIdx.x * blockDim.x + threadIdx.x;
95 const int str = blockDim.x * gridDim.x;
96
97 for (int i = idx; i < mask_size; i += str) {
98 a[mask[i]-1] = a[mask[i]-1] * b[mask[i]-1];
99 }
100}
101
105template <typename T>
107 T* __restrict__ a, T* __restrict__ b, T* __restrict__ c, const int size,
108 int* __restrict__ mask, const int mask_size) {
109
110 const int idx = blockIdx.x * blockDim.x + threadIdx.x;
111 const int str = blockDim.x * gridDim.x;
112
113 for (int i = idx; i < mask_size; i += str) {
114 a[mask[i]-1] = b[mask[i]-1] * c[mask[i]-1];
115 }
116}
117
121template <typename T>
123 T* __restrict__ a, T* __restrict__ b, T* __restrict__ c, const int size,
124 int* __restrict__ mask, const int mask_size) {
125
126 const int idx = blockIdx.x * blockDim.x + threadIdx.x;
127 const int str = blockDim.x * gridDim.x;
128
129 for (int i = idx; i < mask_size; i += str) {
130 a[mask[i]-1] = b[mask[i]-1] - c[mask[i]-1];
131 }
132}
133
134#endif // __NEKO_CUDA_MATH_EXT_KERNELS__
__global__ void convex_down_RAMP_mapping_apply_kernel(const T f_min, const T f_max, const T q, T *__restrict__ X_out_d, T *__restrict__ X_in_d, const int n)
__global__ void col3_mask_kernel(T *__restrict__ a, T *__restrict__ b, T *__restrict__ c, const int size, int *__restrict__ mask, const int mask_size)
__global__ void cadd_mask_kernel(T *__restrict__ a, const T c, const int size, int *__restrict__ mask, const int mask_size)
__global__ void sub3_mask_kernel(T *__restrict__ a, T *__restrict__ b, T *__restrict__ c, const int size, int *__restrict__ mask, const int mask_size)
__global__ void copy_mask_kernel(T *__restrict__ a, T *__restrict__ b, const int size, int *__restrict__ mask, const int mask_size)
__global__ void invcol1_mask_kernel(T *__restrict__ a, const int size, int *__restrict__ mask, const int mask_size)
__global__ void col2_mask_kernel(T *__restrict__ a, T *__restrict__ b, const int size, int *__restrict__ mask, const int mask_size)