Skip to content

Commit 3792d36

Browse files
committed
Exclude more kernels from coverage
1 parent 797d28f commit 3792d36

3 files changed

Lines changed: 10 additions & 0 deletions

File tree

src/indexing.jl

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,7 @@ function Base.findall(bools::AnyCuArray{Bool})
3131
ys = CuArray{I}(undef, n)
3232

3333
if n > 0
34+
## COV_EXCL_START
3435
function kernel(ys::CuDeviceArray, bools, indices)
3536
i = threadIdx().x + (blockIdx().x - 1i32) * blockDim().x
3637

@@ -42,6 +43,7 @@ function Base.findall(bools::AnyCuArray{Bool})
4243

4344
return
4445
end
46+
## COV_EXCL_STOP
4547

4648
kernel = @cuda name="findall" launch=false kernel(ys, bools, indices)
4749
config = launch_configuration(kernel.fun)

src/random.jl

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -44,6 +44,7 @@ Random.seed!(rng::RNG) = Random.seed!(rng, make_seed())
4444
function Random.rand!(rng::RNG, A::AnyCuArray)
4545
isempty(A) && return A
4646

47+
## COV_EXCL_START
4748
function kernel(A::AbstractArray{T}, seed::UInt32, counter::UInt32) where {T}
4849
device_rng = Random.default_rng()
4950

@@ -65,6 +66,7 @@ function Random.rand!(rng::RNG, A::AnyCuArray)
6566

6667
return
6768
end
69+
## COV_EXCL_STOP
6870

6971
# XXX: because of how random numbers are generated, the launch configuration
7072
# affects the results. as such, use a constant number of threads, set
@@ -88,6 +90,7 @@ end
8890
function Random.randn!(rng::RNG, A::AnyCuArray{<:Union{AbstractFloat,Complex{<:AbstractFloat}}})
8991
isempty(A) && return A
9092

93+
## COV_EXCL_START
9194
function kernel(A::AbstractArray{T}, seed::UInt32, counter::UInt32) where {T<:Real}
9295
device_rng = Random.default_rng()
9396

@@ -149,6 +152,7 @@ function Random.randn!(rng::RNG, A::AnyCuArray{<:Union{AbstractFloat,Complex{<:A
149152
end
150153
return
151154
end
155+
## COV_EXCL_STOP
152156

153157
# see note in `rand!` about the launch configuration
154158
threads = 32

src/reverse.jl

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@ function _reverse(input::AnyCuArray{T, N}, output::AnyCuArray{T, N};
1515
# converts a linear index in a reduced array to an ND-index, but using the reduced size
1616
nd_idx = CartesianIndices(input)
1717

18+
## COV_EXCL_START
1819
function kernel(input::AbstractArray{T, N}, output::AbstractArray{T, N}) where {T, N}
1920
offset_in = blockDim().x * (blockIdx().x - 1i32)
2021
index_in = offset_in + threadIdx().x
@@ -28,6 +29,7 @@ function _reverse(input::AnyCuArray{T, N}, output::AnyCuArray{T, N};
2829

2930
return
3031
end
32+
## COV_EXCL_STOP
3133

3234
nthreads = 256
3335
nblocks = cld(length(input), nthreads)
@@ -51,6 +53,7 @@ function _reverse!(data::AnyCuArray{T, N}; dims=1:ndims(data)) where {T, N}
5153
# converts a linear index in a reduced array to an ND-index, but using the reduced size
5254
nd_idx = CartesianIndices(reduced_size)
5355

56+
## COV_EXCL_START
5457
function kernel(data::AbstractArray{T, N}) where {T, N}
5558
offset_in = blockDim().x * (blockIdx().x - 1i32)
5659

@@ -71,6 +74,7 @@ function _reverse!(data::AnyCuArray{T, N}; dims=1:ndims(data)) where {T, N}
7174

7275
return
7376
end
77+
## COV_EXCL_STOP
7478

7579
# NOTE: we launch slightly more than half the number of elements in the array as threads.
7680
# The last non-singleton dimension along which to reverse is used to define how the array is split.

0 commit comments

Comments
 (0)