Line | Exclusive | Inclusive | Code |
---|---|---|---|
1 | # This file is a part of Julia. License is MIT: https://julialang.org/license | ||
2 | |||
3 | ### Multidimensional iterators | ||
4 | module IteratorsMD | ||
5 | import .Base: eltype, length, size, first, last, in, getindex, setindex!, | ||
6 | min, max, zero, oneunit, isless, eachindex, | ||
7 | convert, show, iterate, promote_rule | ||
8 | |||
9 | import .Base: +, -, *, (:) | ||
10 | import .Base: simd_outer_range, simd_inner_length, simd_index, setindex | ||
11 | import .Base: to_indices, to_index, _to_indices1, _cutdim | ||
12 | using .Base: IndexLinear, IndexCartesian, AbstractCartesianIndex, fill_to_length, tail, | ||
13 | ReshapedArray, ReshapedArrayLF, OneTo, Fix1 | ||
14 | using .Base.Iterators: Reverse, PartitionIterator | ||
15 | using .Base: @propagate_inbounds | ||
16 | |||
17 | export CartesianIndex, CartesianIndices | ||
18 | |||
19 | """ | ||
20 | CartesianIndex(i, j, k...) -> I | ||
21 | CartesianIndex((i, j, k...)) -> I | ||
22 | |||
23 | Create a multidimensional index `I`, which can be used for | ||
24 | indexing a multidimensional array `A`. In particular, `A[I]` is | ||
25 | equivalent to `A[i,j,k...]`. One can freely mix integer and | ||
26 | `CartesianIndex` indices; for example, `A[Ipre, i, Ipost]` (where | ||
27 | `Ipre` and `Ipost` are `CartesianIndex` indices and `i` is an | ||
28 | `Int`) can be a useful expression when writing algorithms that | ||
29 | work along a single dimension of an array of arbitrary | ||
30 | dimensionality. | ||
31 | |||
32 | A `CartesianIndex` is sometimes produced by [`eachindex`](@ref), and | ||
33 | always when iterating with an explicit [`CartesianIndices`](@ref). | ||
34 | |||
35 | An `I::CartesianIndex` is treated as a "scalar" (not a container) | ||
36 | for `broadcast`. In order to iterate over the components of a | ||
37 | `CartesianIndex`, convert it to a tuple with `Tuple(I)`. | ||
38 | |||
39 | # Examples | ||
40 | ```jldoctest | ||
41 | julia> A = reshape(Vector(1:16), (2, 2, 2, 2)) | ||
42 | 2×2×2×2 Array{Int64, 4}: | ||
43 | [:, :, 1, 1] = | ||
44 | 1 3 | ||
45 | 2 4 | ||
46 | |||
47 | [:, :, 2, 1] = | ||
48 | 5 7 | ||
49 | 6 8 | ||
50 | |||
51 | [:, :, 1, 2] = | ||
52 | 9 11 | ||
53 | 10 12 | ||
54 | |||
55 | [:, :, 2, 2] = | ||
56 | 13 15 | ||
57 | 14 16 | ||
58 | |||
59 | julia> A[CartesianIndex((1, 1, 1, 1))] | ||
60 | 1 | ||
61 | |||
62 | julia> A[CartesianIndex((1, 1, 1, 2))] | ||
63 | 9 | ||
64 | |||
65 | julia> A[CartesianIndex((1, 1, 2, 1))] | ||
66 | 5 | ||
67 | ``` | ||
68 | |||
69 | !!! compat "Julia 1.10" | ||
70 | Using a `CartesianIndex` as a "scalar" for `broadcast` requires | ||
71 | Julia 1.10; in previous releases, use `Ref(I)`. | ||
72 | """ | ||
73 | struct CartesianIndex{N} <: AbstractCartesianIndex{N} | ||
74 | I::NTuple{N,Int} | ||
75 | CartesianIndex{N}(index::NTuple{N,Integer}) where {N} = new(index) | ||
76 | end | ||
77 | |||
78 | CartesianIndex(index::NTuple{N,Integer}) where {N} = CartesianIndex{N}(index) | ||
79 | CartesianIndex(index::Integer...) = CartesianIndex(index) | ||
80 | CartesianIndex{N}(index::Vararg{Integer,N}) where {N} = CartesianIndex{N}(index) | ||
81 | # Allow passing tuples smaller than N | ||
82 | CartesianIndex{N}(index::Tuple) where {N} = CartesianIndex{N}(fill_to_length(index, 1, Val(N))) | ||
83 | CartesianIndex{N}(index::Integer...) where {N} = CartesianIndex{N}(index) | ||
84 | CartesianIndex{N}() where {N} = CartesianIndex{N}(()) | ||
85 | # Un-nest passed CartesianIndexes | ||
86 | CartesianIndex(index::Union{Integer, CartesianIndex}...) = CartesianIndex(flatten(index)) | ||
87 | flatten(::Tuple{}) = () | ||
88 | flatten(I::Tuple{Any}) = Tuple(I[1]) | ||
89 | @inline flatten(I::Tuple) = (Tuple(I[1])..., flatten(tail(I))...) | ||
90 | CartesianIndex(index::Tuple{Vararg{Union{Integer, CartesianIndex}}}) = CartesianIndex(index...) | ||
91 | show(io::IO, i::CartesianIndex) = (print(io, "CartesianIndex"); show(io, i.I)) | ||
92 | |||
93 | # length | ||
94 | length(::CartesianIndex{N}) where {N} = N | ||
95 | length(::Type{CartesianIndex{N}}) where {N} = N | ||
96 | |||
97 | # indexing | ||
98 | getindex(index::CartesianIndex, i::Integer) = index.I[i] | ||
99 | Base.get(A::AbstractArray, I::CartesianIndex, default) = get(A, I.I, default) | ||
100 | eltype(::Type{T}) where {T<:CartesianIndex} = eltype(fieldtype(T, :I)) | ||
101 | |||
102 | # access to index tuple | ||
103 | Tuple(index::CartesianIndex) = index.I | ||
104 | |||
105 | Base.setindex(x::CartesianIndex,i,j) = CartesianIndex(Base.setindex(Tuple(x),i,j)) | ||
106 | |||
107 | # equality | ||
108 | Base.:(==)(a::CartesianIndex{N}, b::CartesianIndex{N}) where N = a.I == b.I | ||
109 | |||
110 | # zeros and ones | ||
111 | zero(::CartesianIndex{N}) where {N} = zero(CartesianIndex{N}) | ||
112 | zero(::Type{CartesianIndex{N}}) where {N} = CartesianIndex(ntuple(Returns(0), Val(N))) | ||
113 | oneunit(::CartesianIndex{N}) where {N} = oneunit(CartesianIndex{N}) | ||
114 | oneunit(::Type{CartesianIndex{N}}) where {N} = CartesianIndex(ntuple(Returns(1), Val(N))) | ||
115 | |||
116 | # arithmetic, min/max | ||
117 | @inline (-)(index::CartesianIndex{N}) where {N} = | ||
118 | CartesianIndex{N}(map(-, index.I)) | ||
119 | @inline (+)(index1::CartesianIndex{N}, index2::CartesianIndex{N}) where {N} = | ||
120 | CartesianIndex{N}(map(+, index1.I, index2.I)) | ||
121 | @inline (-)(index1::CartesianIndex{N}, index2::CartesianIndex{N}) where {N} = | ||
122 | CartesianIndex{N}(map(-, index1.I, index2.I)) | ||
123 | @inline min(index1::CartesianIndex{N}, index2::CartesianIndex{N}) where {N} = | ||
124 | CartesianIndex{N}(map(min, index1.I, index2.I)) | ||
125 | @inline max(index1::CartesianIndex{N}, index2::CartesianIndex{N}) where {N} = | ||
126 | CartesianIndex{N}(map(max, index1.I, index2.I)) | ||
127 | |||
128 | @inline (*)(a::Integer, index::CartesianIndex{N}) where {N} = CartesianIndex{N}(map(x->a*x, index.I)) | ||
129 | @inline (*)(index::CartesianIndex, a::Integer) = *(a,index) | ||
130 | |||
131 | # comparison | ||
132 | isless(I1::CartesianIndex{N}, I2::CartesianIndex{N}) where {N} = isless(reverse(I1.I), reverse(I2.I)) | ||
133 | |||
134 | # conversions | ||
135 | convert(::Type{T}, index::CartesianIndex{1}) where {T<:Number} = convert(T, index[1]) | ||
136 | convert(::Type{T}, index::CartesianIndex) where {T<:Tuple} = convert(T, index.I) | ||
137 | |||
138 | # hashing | ||
139 | const cartindexhash_seed = UInt == UInt64 ? 0xd60ca92f8284b8b0 : 0xf2ea7c2e | ||
140 | function Base.hash(ci::CartesianIndex, h::UInt) | ||
141 | h += cartindexhash_seed | ||
142 | for i in ci.I | ||
143 | h = hash(i, h) | ||
144 | end | ||
145 | return h | ||
146 | end | ||
147 | |||
148 | # nextind and prevind with CartesianIndex | ||
149 | function Base.nextind(a::AbstractArray{<:Any,N}, i::CartesianIndex{N}) where {N} | ||
150 | iter = CartesianIndices(axes(a)) | ||
151 | # might overflow | ||
152 | I = inc(i.I, iter.indices) | ||
153 | return I | ||
154 | end | ||
155 | function Base.prevind(a::AbstractArray{<:Any,N}, i::CartesianIndex{N}) where {N} | ||
156 | iter = CartesianIndices(axes(a)) | ||
157 | # might underflow | ||
158 | I = dec(i.I, iter.indices) | ||
159 | return I | ||
160 | end | ||
161 | |||
162 | Base._ind2sub(t::Tuple, ind::CartesianIndex) = Tuple(ind) | ||
163 | |||
164 | # Iteration over the elements of CartesianIndex cannot be supported until its length can be inferred, | ||
165 | # see #23719 | ||
166 | Base.iterate(::CartesianIndex) = | ||
167 | error("iteration is deliberately unsupported for CartesianIndex. Use `I` rather than `I...`, or use `Tuple(I)...`") | ||
168 | |||
169 | # Iteration | ||
170 | const OrdinalRangeInt = OrdinalRange{Int, Int} | ||
171 | """ | ||
172 | CartesianIndices(sz::Dims) -> R | ||
173 | CartesianIndices((istart:[istep:]istop, jstart:[jstep:]jstop, ...)) -> R | ||
174 | |||
175 | Define a region `R` spanning a multidimensional rectangular range | ||
176 | of integer indices. These are most commonly encountered in the | ||
177 | context of iteration, where `for I in R ... end` will return | ||
178 | [`CartesianIndex`](@ref) indices `I` equivalent to the nested loops | ||
179 | |||
180 | for j = jstart:jstep:jstop | ||
181 | for i = istart:istep:istop | ||
182 | ... | ||
183 | end | ||
184 | end | ||
185 | |||
186 | Consequently these can be useful for writing algorithms that | ||
187 | work in arbitrary dimensions. | ||
188 | |||
189 | CartesianIndices(A::AbstractArray) -> R | ||
190 | |||
191 | As a convenience, constructing a `CartesianIndices` from an array makes a | ||
192 | range of its indices. | ||
193 | |||
194 | !!! compat "Julia 1.6" | ||
195 | The step range method `CartesianIndices((istart:istep:istop, jstart:[jstep:]jstop, ...))` | ||
196 | requires at least Julia 1.6. | ||
197 | |||
198 | # Examples | ||
199 | ```jldoctest | ||
200 | julia> foreach(println, CartesianIndices((2, 2, 2))) | ||
201 | CartesianIndex(1, 1, 1) | ||
202 | CartesianIndex(2, 1, 1) | ||
203 | CartesianIndex(1, 2, 1) | ||
204 | CartesianIndex(2, 2, 1) | ||
205 | CartesianIndex(1, 1, 2) | ||
206 | CartesianIndex(2, 1, 2) | ||
207 | CartesianIndex(1, 2, 2) | ||
208 | CartesianIndex(2, 2, 2) | ||
209 | |||
210 | julia> CartesianIndices(fill(1, (2,3))) | ||
211 | CartesianIndices((2, 3)) | ||
212 | ``` | ||
213 | |||
214 | ## Conversion between linear and cartesian indices | ||
215 | |||
216 | Linear index to cartesian index conversion exploits the fact that a | ||
217 | `CartesianIndices` is an `AbstractArray` and can be indexed linearly: | ||
218 | |||
219 | ```jldoctest | ||
220 | julia> cartesian = CartesianIndices((1:3, 1:2)) | ||
221 | CartesianIndices((1:3, 1:2)) | ||
222 | |||
223 | julia> cartesian[4] | ||
224 | CartesianIndex(1, 2) | ||
225 | |||
226 | julia> cartesian = CartesianIndices((1:2:5, 1:2)) | ||
227 | CartesianIndices((1:2:5, 1:2)) | ||
228 | |||
229 | julia> cartesian[2, 2] | ||
230 | CartesianIndex(3, 2) | ||
231 | ``` | ||
232 | |||
233 | ## Broadcasting | ||
234 | |||
235 | `CartesianIndices` support broadcasting arithmetic (+ and -) with a `CartesianIndex`. | ||
236 | |||
237 | !!! compat "Julia 1.1" | ||
238 | Broadcasting of CartesianIndices requires at least Julia 1.1. | ||
239 | |||
240 | ```jldoctest | ||
241 | julia> CIs = CartesianIndices((2:3, 5:6)) | ||
242 | CartesianIndices((2:3, 5:6)) | ||
243 | |||
244 | julia> CI = CartesianIndex(3, 4) | ||
245 | CartesianIndex(3, 4) | ||
246 | |||
247 | julia> CIs .+ CI | ||
248 | CartesianIndices((5:6, 9:10)) | ||
249 | ``` | ||
250 | |||
251 | For cartesian to linear index conversion, see [`LinearIndices`](@ref). | ||
252 | """ | ||
253 | struct CartesianIndices{N,R<:NTuple{N,OrdinalRangeInt}} <: AbstractArray{CartesianIndex{N},N} | ||
254 | indices::R | ||
255 | end | ||
256 | |||
257 | CartesianIndices(::Tuple{}) = CartesianIndices{0,typeof(())}(()) | ||
258 | function CartesianIndices(inds::NTuple{N,OrdinalRange{<:Integer, <:Integer}}) where {N} | ||
259 | indices = map(r->convert(OrdinalRangeInt, r), inds) | ||
260 | CartesianIndices{N, typeof(indices)}(indices) | ||
261 | end | ||
262 | |||
263 | CartesianIndices(index::CartesianIndex) = CartesianIndices(index.I) | ||
264 | CartesianIndices(inds::NTuple{N,Union{<:Integer,OrdinalRange{<:Integer}}}) where {N} = | ||
265 | CartesianIndices(map(_convert2ind, inds)) | ||
266 | |||
267 | CartesianIndices(A::AbstractArray) = CartesianIndices(axes(A)) | ||
268 | |||
269 | _convert2ind(sz::Bool) = Base.OneTo(Int8(sz)) | ||
270 | _convert2ind(sz::Integer) = Base.OneTo(sz) | ||
271 | _convert2ind(sz::AbstractUnitRange) = first(sz):last(sz) | ||
272 | _convert2ind(sz::OrdinalRange) = first(sz):step(sz):last(sz) | ||
273 | |||
274 | function show(io::IO, iter::CartesianIndices) | ||
275 | print(io, "CartesianIndices(") | ||
276 | show(io, map(_xform_index, iter.indices)) | ||
277 | print(io, ")") | ||
278 | end | ||
279 | _xform_index(i) = i | ||
280 | _xform_index(i::OneTo) = i.stop | ||
281 | show(io::IO, ::MIME"text/plain", iter::CartesianIndices) = show(io, iter) | ||
282 | |||
283 | """ | ||
284 | (:)(start::CartesianIndex, [step::CartesianIndex], stop::CartesianIndex) | ||
285 | |||
286 | Construct [`CartesianIndices`](@ref) from two `CartesianIndex` and an optional step. | ||
287 | |||
288 | !!! compat "Julia 1.1" | ||
289 | This method requires at least Julia 1.1. | ||
290 | |||
291 | !!! compat "Julia 1.6" | ||
292 | The step range method start:step:stop requires at least Julia 1.6. | ||
293 | |||
294 | # Examples | ||
295 | ```jldoctest | ||
296 | julia> I = CartesianIndex(2,1); | ||
297 | |||
298 | julia> J = CartesianIndex(3,3); | ||
299 | |||
300 | julia> I:J | ||
301 | CartesianIndices((2:3, 1:3)) | ||
302 | |||
303 | julia> I:CartesianIndex(1, 2):J | ||
304 | CartesianIndices((2:1:3, 1:2:3)) | ||
305 | ``` | ||
306 | """ | ||
307 | (:)(I::CartesianIndex{N}, J::CartesianIndex{N}) where N = | ||
308 | CartesianIndices(map((i,j) -> i:j, Tuple(I), Tuple(J))) | ||
309 | (:)(I::CartesianIndex{N}, S::CartesianIndex{N}, J::CartesianIndex{N}) where N = | ||
310 | CartesianIndices(map((i,s,j) -> i:s:j, Tuple(I), Tuple(S), Tuple(J))) | ||
311 | |||
312 | promote_rule(::Type{CartesianIndices{N,R1}}, ::Type{CartesianIndices{N,R2}}) where {N,R1,R2} = | ||
313 | CartesianIndices{N,Base.indices_promote_type(R1,R2)} | ||
314 | |||
315 | convert(::Type{Tuple{}}, R::CartesianIndices{0}) = () | ||
316 | for RT in (OrdinalRange{Int, Int}, StepRange{Int, Int}, AbstractUnitRange{Int}) | ||
317 | @eval convert(::Type{NTuple{N,$RT}}, R::CartesianIndices{N}) where {N} = | ||
318 | map(x->convert($RT, x), R.indices) | ||
319 | end | ||
320 | convert(::Type{NTuple{N,AbstractUnitRange}}, R::CartesianIndices{N}) where {N} = | ||
321 | convert(NTuple{N,AbstractUnitRange{Int}}, R) | ||
322 | convert(::Type{NTuple{N,UnitRange{Int}}}, R::CartesianIndices{N}) where {N} = | ||
323 | UnitRange{Int}.(convert(NTuple{N,AbstractUnitRange}, R)) | ||
324 | convert(::Type{NTuple{N,UnitRange}}, R::CartesianIndices{N}) where {N} = | ||
325 | UnitRange.(convert(NTuple{N,AbstractUnitRange}, R)) | ||
326 | convert(::Type{Tuple{Vararg{AbstractUnitRange{Int}}}}, R::CartesianIndices{N}) where {N} = | ||
327 | convert(NTuple{N,AbstractUnitRange{Int}}, R) | ||
328 | convert(::Type{Tuple{Vararg{AbstractUnitRange}}}, R::CartesianIndices) = | ||
329 | convert(Tuple{Vararg{AbstractUnitRange{Int}}}, R) | ||
330 | convert(::Type{Tuple{Vararg{UnitRange{Int}}}}, R::CartesianIndices{N}) where {N} = | ||
331 | convert(NTuple{N,UnitRange{Int}}, R) | ||
332 | convert(::Type{Tuple{Vararg{UnitRange}}}, R::CartesianIndices) = | ||
333 | convert(Tuple{Vararg{UnitRange{Int}}}, R) | ||
334 | |||
335 | convert(::Type{CartesianIndices{N,R}}, inds::CartesianIndices{N}) where {N,R} = | ||
336 | CartesianIndices(convert(R, inds.indices))::CartesianIndices{N,R} | ||
337 | |||
338 | # equality | ||
339 | Base.:(==)(a::CartesianIndices{N}, b::CartesianIndices{N}) where N = | ||
340 | all(map(==, a.indices, b.indices)) | ||
341 | Base.:(==)(a::CartesianIndices, b::CartesianIndices) = false | ||
342 | |||
343 | # AbstractArray implementation | ||
344 | Base.axes(iter::CartesianIndices{N,R}) where {N,R} = map(Base.axes1, iter.indices) | ||
345 | Base.has_offset_axes(iter::CartesianIndices) = Base.has_offset_axes(iter.indices...) | ||
346 | @propagate_inbounds function isassigned(iter::CartesianIndices{N,R}, I::Vararg{Int, N}) where {N,R} | ||
347 | for i in 1:N | ||
348 | isassigned(iter.indices[i], I[i]) || return false | ||
349 | end | ||
350 | return true | ||
351 | end | ||
352 | |||
353 | # getindex for a 0D CartesianIndices is necessary for disambiguation | ||
354 | @propagate_inbounds function Base.getindex(iter::CartesianIndices{0,R}) where {R} | ||
355 | CartesianIndex() | ||
356 | end | ||
357 | @inline function Base.getindex(iter::CartesianIndices{N,R}, I::Vararg{Int, N}) where {N,R} | ||
358 | # Eagerly do boundscheck before calculating each item of the CartesianIndex so that | ||
359 | # we can pass `@inbounds` hint to inside the map and generates more efficient SIMD codes (#42115) | ||
360 | @boundscheck checkbounds(iter, I...) | ||
361 | index = map(iter.indices, I) do r, i | ||
362 | @inbounds getindex(r, i) | ||
363 | end | ||
364 | CartesianIndex(index) | ||
365 | end | ||
366 | |||
367 | # CartesianIndices act as a multidimensional range, so cartesian indexing of CartesianIndices | ||
368 | # with compatible dimensions may be seen as indexing into the component ranges. | ||
369 | # This may use the special indexing behavior implemented for ranges to return another CartesianIndices | ||
370 | @inline function Base.getindex(iter::CartesianIndices{N,R}, | ||
371 | I::Vararg{Union{OrdinalRange{<:Integer, <:Integer}, Colon}, N}) where {N,R} | ||
372 | @boundscheck checkbounds(iter, I...) | ||
373 | indices = map(iter.indices, I) do r, i | ||
374 | @inbounds getindex(r, i) | ||
375 | end | ||
376 | CartesianIndices(indices) | ||
377 | end | ||
378 | @propagate_inbounds function Base.getindex(iter::CartesianIndices{N}, | ||
379 | C::CartesianIndices{N}) where {N} | ||
380 | getindex(iter, C.indices...) | ||
381 | end | ||
382 | @inline Base.getindex(iter::CartesianIndices{0}, ::CartesianIndices{0}) = iter | ||
383 | |||
384 | # If dimensions permit, we may index into a CartesianIndices directly instead of constructing a SubArray wrapper | ||
385 | @propagate_inbounds function Base.view(c::CartesianIndices{N}, r::Vararg{Union{OrdinalRange{<:Integer, <:Integer}, Colon},N}) where {N} | ||
386 | getindex(c, r...) | ||
387 | end | ||
388 | @propagate_inbounds function Base.view(c::CartesianIndices{N}, C::CartesianIndices{N}) where {N} | ||
389 | getindex(c, C) | ||
390 | end | ||
391 | |||
392 | eachindex(::IndexCartesian, A::AbstractArray) = CartesianIndices(axes(A)) | ||
393 | |||
394 | @inline function eachindex(::IndexCartesian, A::AbstractArray, B::AbstractArray...) | ||
395 | axsA = axes(A) | ||
396 | Base._all_match_first(axes, axsA, B...) || Base.throw_eachindex_mismatch_indices(IndexCartesian(), axes(A), axes.(B)...) | ||
397 | CartesianIndices(axsA) | ||
398 | end | ||
399 | |||
400 | @inline function iterate(iter::CartesianIndices) | ||
401 | iterfirst = first(iter) | ||
402 | if !all(map(in, iterfirst.I, iter.indices)) | ||
403 | 16 (6 %) | 16 (6 %) |
16 (6 %)
samples spent in iterate
return nothing
16 (100 %) (ex.), 16 (100 %) (incl.) when called from iterate line 1212 |
404 | end | ||
405 | iterfirst, iterfirst | ||
406 | end | ||
407 | @inline function iterate(iter::CartesianIndices, state) | ||
408 | 3 (1 %) |
3 (100 %)
samples spent calling
__inc
valid, I = __inc(state.I, iter.indices)
|
|
409 | valid || return nothing | ||
410 | return CartesianIndex(I...), CartesianIndex(I...) | ||
411 | end | ||
412 | |||
413 | # increment & carry | ||
414 | @inline function inc(state, indices) | ||
415 | _, I = __inc(state, indices) | ||
416 | return CartesianIndex(I...) | ||
417 | end | ||
418 | |||
419 | # Unlike ordinary ranges, CartesianIndices continues the iteration in the next column when the | ||
420 | # current column is consumed. The implementation is written recursively to achieve this. | ||
421 | # `iterate` returns `Union{Nothing, Tuple}`, we explicitly pass a `valid` flag to eliminate | ||
422 | # the type instability inside the core `__inc` logic, and this gives better runtime performance. | ||
423 | __inc(::Tuple{}, ::Tuple{}) = false, () | ||
424 | @inline function __inc(state::Tuple{Int}, indices::Tuple{OrdinalRangeInt}) | ||
425 | rng = indices[1] | ||
426 | I = state[1] + step(rng) | ||
427 | valid = __is_valid_range(I, rng) && state[1] != last(rng) | ||
428 | return valid, (I, ) | ||
429 | end | ||
430 | @inline function __inc(state::Tuple{Int,Int,Vararg{Int}}, indices::Tuple{OrdinalRangeInt,OrdinalRangeInt,Vararg{OrdinalRangeInt}}) | ||
431 | rng = indices[1] | ||
432 | I = state[1] + step(rng) | ||
433 | 3 (1 %) |
3 (100 %)
samples spent calling
__is_valid_range
if __is_valid_range(I, rng) && state[1] != last(rng)
|
|
434 | return true, (I, tail(state)...) | ||
435 | end | ||
436 | valid, I = __inc(tail(state), tail(indices)) | ||
437 | return valid, (first(rng), I...) | ||
438 | end | ||
439 | |||
440 | 3 (1 %) |
3 (100 %)
samples spent calling
in
@inline __is_valid_range(I, rng::AbstractUnitRange) = I in rng
|
|
441 | @inline function __is_valid_range(I, rng::OrdinalRange) | ||
442 | if step(rng) > 0 | ||
443 | lo, hi = first(rng), last(rng) | ||
444 | else | ||
445 | lo, hi = last(rng), first(rng) | ||
446 | end | ||
447 | lo <= I <= hi | ||
448 | end | ||
449 | |||
450 | # 0-d cartesian ranges are special-cased to iterate once and only once | ||
451 | iterate(iter::CartesianIndices{0}, done=false) = done ? nothing : (CartesianIndex(), true) | ||
452 | |||
453 | size(iter::CartesianIndices) = map(length, iter.indices) | ||
454 | |||
455 | length(iter::CartesianIndices) = prod(size(iter)) | ||
456 | |||
457 | # make CartesianIndices a multidimensional range | ||
458 | Base.step(iter::CartesianIndices) = CartesianIndex(map(step, iter.indices)) | ||
459 | |||
460 | first(iter::CartesianIndices) = CartesianIndex(map(first, iter.indices)) | ||
461 | last(iter::CartesianIndices) = CartesianIndex(map(last, iter.indices)) | ||
462 | |||
463 | # When used as indices themselves, CartesianIndices can simply become its tuple of ranges | ||
464 | _to_indices1(A, inds, I1::CartesianIndices) = map(Fix1(to_index, A), I1.indices) | ||
465 | _cutdim(inds::Tuple, I1::CartesianIndices) = split(inds, Val(ndims(I1)))[2] | ||
466 | |||
467 | # but preserve CartesianIndices{0} as they consume a dimension. | ||
468 | _to_indices1(A, inds, I1::CartesianIndices{0}) = (I1,) | ||
469 | |||
470 | @inline in(i::CartesianIndex, r::CartesianIndices) = false | ||
471 | @inline in(i::CartesianIndex{N}, r::CartesianIndices{N}) where {N} = all(map(in, i.I, r.indices)) | ||
472 | |||
473 | simd_outer_range(iter::CartesianIndices{0}) = iter | ||
474 | function simd_outer_range(iter::CartesianIndices) | ||
475 | CartesianIndices(tail(iter.indices)) | ||
476 | end | ||
477 | |||
478 | simd_inner_length(iter::CartesianIndices{0}, ::CartesianIndex) = 1 | ||
479 | simd_inner_length(iter::CartesianIndices, I::CartesianIndex) = Base.length(iter.indices[1]) | ||
480 | |||
481 | simd_index(iter::CartesianIndices{0}, ::CartesianIndex, I1::Int) = first(iter) | ||
482 | @propagate_inbounds simd_index(iter::CartesianIndices, Ilast::CartesianIndex, I1::Int) = | ||
483 | CartesianIndex(iter.indices[1][I1+firstindex(iter.indices[1])], Ilast) | ||
484 | |||
485 | # Split out the first N elements of a tuple | ||
486 | @inline function split(t, V::Val) | ||
487 | ref = ntuple(Returns(true), V) # create a reference tuple of length N | ||
488 | _split1(t, ref), _splitrest(t, ref) | ||
489 | end | ||
490 | @inline _split1(t, ref) = (t[1], _split1(tail(t), tail(ref))...) | ||
491 | @inline _splitrest(t, ref) = _splitrest(tail(t), tail(ref)) | ||
492 | # exit either when we've exhausted the input or reference tuple | ||
493 | _split1(::Tuple{}, ::Tuple{}) = () | ||
494 | _split1(::Tuple{}, ref) = () | ||
495 | _split1(t, ::Tuple{}) = () | ||
496 | _splitrest(::Tuple{}, ::Tuple{}) = () | ||
497 | _splitrest(t, ::Tuple{}) = t | ||
498 | _splitrest(::Tuple{}, ref) = () | ||
499 | |||
500 | @inline function split(I::CartesianIndex, V::Val) | ||
501 | i, j = split(I.I, V) | ||
502 | CartesianIndex(i), CartesianIndex(j) | ||
503 | end | ||
504 | function split(R::CartesianIndices, V::Val) | ||
505 | i, j = split(R.indices, V) | ||
506 | CartesianIndices(i), CartesianIndices(j) | ||
507 | end | ||
508 | |||
509 | # reversed CartesianIndices iteration | ||
510 | @inline function Base._reverse(iter::CartesianIndices, ::Colon) | ||
511 | CartesianIndices(reverse.(iter.indices)) | ||
512 | end | ||
513 | |||
514 | Base.@constprop :aggressive function Base._reverse(iter::CartesianIndices, dim::Integer) | ||
515 | 1 <= dim <= ndims(iter) || throw(ArgumentError(Base.LazyString("invalid dimension ", dim, " in reverse"))) | ||
516 | ndims(iter) == 1 && return Base._reverse(iter, :) | ||
517 | indices = iter.indices | ||
518 | return CartesianIndices(Base.setindex(indices, reverse(indices[dim]), dim)) | ||
519 | end | ||
520 | |||
521 | Base.@constprop :aggressive function Base._reverse(iter::CartesianIndices, dims::Tuple{Vararg{Integer}}) | ||
522 | indices = iter.indices | ||
523 | # use `sum` to force const fold | ||
524 | dimrev = ntuple(i -> sum(==(i), dims; init = 0) == 1, Val(length(indices))) | ||
525 | length(dims) == sum(dimrev) || throw(ArgumentError(Base.LazyString("invalid dimensions ", dims, " in reverse"))) | ||
526 | length(dims) == length(indices) && return Base._reverse(iter, :) | ||
527 | indices′ = map((i, f) -> f ? (@noinline reverse(i)) : i, indices, dimrev) | ||
528 | return CartesianIndices(indices′) | ||
529 | end | ||
530 | |||
531 | # fix ambiguity with array.jl: | ||
532 | Base._reverse(iter::CartesianIndices{1}, dims::Tuple{Integer}) = | ||
533 | Base._reverse(iter, first(dims)) | ||
534 | |||
535 | @inline function iterate(r::Reverse{<:CartesianIndices}) | ||
536 | iterfirst = last(r.itr) | ||
537 | if !all(map(in, iterfirst.I, r.itr.indices)) | ||
538 | return nothing | ||
539 | end | ||
540 | iterfirst, iterfirst | ||
541 | end | ||
542 | @inline function iterate(r::Reverse{<:CartesianIndices}, state) | ||
543 | valid, I = __dec(state.I, r.itr.indices) | ||
544 | valid || return nothing | ||
545 | return CartesianIndex(I...), CartesianIndex(I...) | ||
546 | end | ||
547 | |||
548 | # decrement & carry | ||
549 | @inline function dec(state, indices) | ||
550 | _, I = __dec(state, indices) | ||
551 | return CartesianIndex(I...) | ||
552 | end | ||
553 | |||
554 | # decrement post check to avoid integer overflow | ||
555 | @inline __dec(::Tuple{}, ::Tuple{}) = false, () | ||
556 | @inline function __dec(state::Tuple{Int}, indices::Tuple{OrdinalRangeInt}) | ||
557 | rng = indices[1] | ||
558 | I = state[1] - step(rng) | ||
559 | valid = __is_valid_range(I, rng) && state[1] != first(rng) | ||
560 | return valid, (I,) | ||
561 | end | ||
562 | @inline function __dec(state::Tuple{Int,Int,Vararg{Int}}, indices::Tuple{OrdinalRangeInt,OrdinalRangeInt,Vararg{OrdinalRangeInt}}) | ||
563 | rng = indices[1] | ||
564 | I = state[1] - step(rng) | ||
565 | if __is_valid_range(I, rng) && state[1] != first(rng) | ||
566 | return true, (I, tail(state)...) | ||
567 | end | ||
568 | valid, I = __dec(tail(state), tail(indices)) | ||
569 | return valid, (last(rng), I...) | ||
570 | end | ||
571 | |||
572 | # 0-d cartesian ranges are special-cased to iterate once and only once | ||
573 | iterate(iter::Reverse{<:CartesianIndices{0}}, state=false) = state ? nothing : (CartesianIndex(), true) | ||
574 | |||
575 | function Base.LinearIndices(inds::CartesianIndices{N,R}) where {N,R<:NTuple{N, AbstractUnitRange}} | ||
576 | LinearIndices{N,R}(inds.indices) | ||
577 | end | ||
578 | function Base.LinearIndices(inds::CartesianIndices) | ||
579 | indices = inds.indices | ||
580 | if all(x->step(x)==1, indices) | ||
581 | indices = map(rng->first(rng):last(rng), indices) | ||
582 | LinearIndices{length(indices), typeof(indices)}(indices) | ||
583 | else | ||
584 | # Given the fact that StepRange 1:2:4 === 1:2:3, we lost the original size information | ||
585 | # and thus cannot calculate the correct linear indices when the steps are not 1. | ||
586 | throw(ArgumentError("LinearIndices for $(typeof(inds)) with non-1 step size is not yet supported.")) | ||
587 | end | ||
588 | end | ||
589 | |||
590 | # This is currently needed because converting to LinearIndices is only available when steps are | ||
591 | # all 1 | ||
592 | # NOTE: this is only a temporary patch and could be possibly removed when StepRange support to | ||
593 | # LinearIndices is done | ||
594 | function Base.collect(inds::CartesianIndices{N, R}) where {N,R<:NTuple{N, AbstractUnitRange}} | ||
595 | Base._collect_indices(axes(inds), inds) | ||
596 | end | ||
597 | function Base.collect(inds::CartesianIndices) | ||
598 | dest = Array{eltype(inds), ndims(inds)}(undef, size(inds)) | ||
599 | i = 0 | ||
600 | @inbounds for a in inds | ||
601 | dest[i+=1] = a | ||
602 | end | ||
603 | dest | ||
604 | end | ||
605 | |||
606 | # array operations | ||
607 | Base.intersect(a::CartesianIndices{N}, b::CartesianIndices{N}) where N = | ||
608 | CartesianIndices(intersect.(a.indices, b.indices)) | ||
609 | |||
610 | # Views of reshaped CartesianIndices are used for partitions — ensure these are fast | ||
611 | const CartesianPartition{T<:CartesianIndex, P<:CartesianIndices, R<:ReshapedArray{T,1,P}} = SubArray{T,1,R,<:Tuple{AbstractUnitRange{Int}},false} | ||
612 | eltype(::Type{PartitionIterator{T}}) where {T<:ReshapedArrayLF} = SubArray{eltype(T), 1, T, Tuple{UnitRange{Int}}, true} | ||
613 | eltype(::Type{PartitionIterator{T}}) where {T<:ReshapedArray} = SubArray{eltype(T), 1, T, Tuple{UnitRange{Int}}, false} | ||
614 | Iterators.IteratorEltype(::Type{<:PartitionIterator{T}}) where {T<:ReshapedArray} = Iterators.IteratorEltype(T) | ||
615 | |||
616 | eltype(::Type{PartitionIterator{T}}) where {T<:OneTo} = UnitRange{eltype(T)} | ||
617 | eltype(::Type{PartitionIterator{T}}) where {T<:Union{UnitRange, StepRange, StepRangeLen, LinRange}} = T | ||
618 | Iterators.IteratorEltype(::Type{<:PartitionIterator{T}}) where {T<:Union{OneTo, UnitRange, StepRange, StepRangeLen, LinRange}} = Iterators.IteratorEltype(T) | ||
619 | |||
620 | @inline function iterate(iter::CartesianPartition) | ||
621 | isempty(iter) && return nothing | ||
622 | f = first(iter) | ||
623 | return (f, (f, 1)) | ||
624 | end | ||
625 | @inline function iterate(iter::CartesianPartition, (state, n)) | ||
626 | n >= length(iter) && return nothing | ||
627 | I = IteratorsMD.inc(state.I, iter.parent.parent.indices) | ||
628 | return I, (I, n+1) | ||
629 | end | ||
630 | |||
631 | @inline function simd_outer_range(iter::CartesianPartition) | ||
632 | # In general, the Cartesian Partition might start and stop in the middle of the outer | ||
633 | # dimensions — thus the outer range of a CartesianPartition is itself a | ||
634 | # CartesianPartition. | ||
635 | mi = iter.parent.mi | ||
636 | ci = iter.parent.parent | ||
637 | ax, ax1 = axes(ci), Base.axes1(ci) | ||
638 | subs = Base.ind2sub_rs(ax, mi, first(iter.indices[1])) | ||
639 | vl, fl = Base._sub2ind(tail(ax), tail(subs)...), subs[1] | ||
640 | vr, fr = divrem(last(iter.indices[1]) - 1, mi[end]) .+ (1, first(ax1)) | ||
641 | oci = CartesianIndices(tail(ci.indices)) | ||
642 | # A fake CartesianPartition to reuse the outer iterate fallback | ||
643 | outer = @inbounds view(ReshapedArray(oci, (length(oci),), mi), vl:vr) | ||
644 | init = @inbounds dec(oci[tail(subs)...].I, oci.indices) # real init state | ||
645 | # Use Generator to make inner loop branchless | ||
646 | @inline function skip_len_I(i::Int, I::CartesianIndex) | ||
647 | l = i == 1 ? fl : first(ax1) | ||
648 | r = i == length(outer) ? fr : last(ax1) | ||
649 | l - first(ax1), r - l + 1, I | ||
650 | end | ||
651 | (skip_len_I(i, I) for (i, I) in Iterators.enumerate(Iterators.rest(outer, (init, 0)))) | ||
652 | end | ||
653 | @inline function simd_outer_range(iter::CartesianPartition{CartesianIndex{2}}) | ||
654 | # But for two-dimensional Partitions the above is just a simple one-dimensional range | ||
655 | # over the second dimension; we don't need to worry about non-rectangular staggers in | ||
656 | # higher dimensions. | ||
657 | mi = iter.parent.mi | ||
658 | ci = iter.parent.parent | ||
659 | ax, ax1 = axes(ci), Base.axes1(ci) | ||
660 | fl, vl = Base.ind2sub_rs(ax, mi, first(iter.indices[1])) | ||
661 | fr, vr = Base.ind2sub_rs(ax, mi, last(iter.indices[1])) | ||
662 | outer = @inbounds CartesianIndices((ci.indices[2][vl:vr],)) | ||
663 | # Use Generator to make inner loop branchless | ||
664 | @inline function skip_len_I(I::CartesianIndex{1}) | ||
665 | l = I == first(outer) ? fl : first(ax1) | ||
666 | r = I == last(outer) ? fr : last(ax1) | ||
667 | l - first(ax1), r - l + 1, I | ||
668 | end | ||
669 | (skip_len_I(I) for I in outer) | ||
670 | end | ||
671 | @inline simd_inner_length(iter::CartesianPartition, (_, len, _)::Tuple{Int,Int,CartesianIndex}) = len | ||
672 | @propagate_inbounds simd_index(iter::CartesianPartition, (skip, _, I)::Tuple{Int,Int,CartesianIndex}, n::Int) = | ||
673 | simd_index(iter.parent.parent, I, n + skip) | ||
674 | end # IteratorsMD | ||
675 | |||
676 | |||
677 | using .IteratorsMD | ||
678 | |||
679 | ## Bounds-checking with CartesianIndex | ||
680 | # Disallow linear indexing with CartesianIndex | ||
681 | function checkbounds(::Type{Bool}, A::AbstractArray, i::Union{CartesianIndex, AbstractArray{<:CartesianIndex}}) | ||
682 | @inline | ||
683 | checkbounds_indices(Bool, axes(A), (i,)) | ||
684 | end | ||
685 | |||
686 | @inline checkbounds_indices(::Type{Bool}, ::Tuple{}, I::Tuple{CartesianIndex,Vararg{Any}}) = | ||
687 | checkbounds_indices(Bool, (), (I[1].I..., tail(I)...)) | ||
688 | @inline checkbounds_indices(::Type{Bool}, IA::Tuple{Any}, I::Tuple{CartesianIndex,Vararg{Any}}) = | ||
689 | checkbounds_indices(Bool, IA, (I[1].I..., tail(I)...)) | ||
690 | @inline checkbounds_indices(::Type{Bool}, IA::Tuple, I::Tuple{CartesianIndex,Vararg{Any}}) = | ||
691 | checkbounds_indices(Bool, IA, (I[1].I..., tail(I)...)) | ||
692 | |||
693 | # Indexing into Array with mixtures of Integers and CartesianIndices is | ||
694 | # extremely performance-sensitive. While the abstract fallbacks support this, | ||
695 | # codegen has extra support for SIMDification that sub2ind doesn't (yet) support | ||
696 | 24 (8 %) |
24 (100 %)
samples spent calling
getindex
@propagate_inbounds getindex(A::Array, i1::Union{Integer, CartesianIndex}, I::Union{Integer, CartesianIndex}...) =
|
|
697 | A[to_indices(A, (i1, I...))...] | ||
698 | @propagate_inbounds setindex!(A::Array, v, i1::Union{Integer, CartesianIndex}, I::Union{Integer, CartesianIndex}...) = | ||
699 | (A[to_indices(A, (i1, I...))...] = v; A) | ||
700 | |||
701 | # Support indexing with an array of CartesianIndex{N}s | ||
702 | # Here we try to consume N of the indices (if there are that many available) | ||
703 | # The first two simply handle ambiguities | ||
704 | @inline function checkbounds_indices(::Type{Bool}, ::Tuple{}, | ||
705 | I::Tuple{AbstractArray{CartesianIndex{N}},Vararg{Any}}) where N | ||
706 | checkindex(Bool, (), I[1]) & checkbounds_indices(Bool, (), tail(I)) | ||
707 | end | ||
708 | @inline function checkbounds_indices(::Type{Bool}, IA::Tuple{Any}, | ||
709 | I::Tuple{AbstractArray{CartesianIndex{0}},Vararg{Any}}) | ||
710 | checkbounds_indices(Bool, IA, tail(I)) | ||
711 | end | ||
712 | @inline function checkbounds_indices(::Type{Bool}, IA::Tuple{Any}, | ||
713 | I::Tuple{AbstractArray{CartesianIndex{N}},Vararg{Any}}) where N | ||
714 | checkindex(Bool, IA, I[1]) & checkbounds_indices(Bool, (), tail(I)) | ||
715 | end | ||
716 | @inline function checkbounds_indices(::Type{Bool}, IA::Tuple, | ||
717 | I::Tuple{AbstractArray{CartesianIndex{N}},Vararg{Any}}) where N | ||
718 | IA1, IArest = IteratorsMD.split(IA, Val(N)) | ||
719 | checkindex(Bool, IA1, I[1]) & checkbounds_indices(Bool, IArest, tail(I)) | ||
720 | end | ||
721 | |||
722 | |||
723 | @inline function checkbounds_indices(::Type{Bool}, IA::Tuple{}, | ||
724 | I::Tuple{AbstractArray{Bool,N},Vararg{Any}}) where N | ||
725 | return checkbounds_indices(Bool, IA, (LogicalIndex(I[1]), tail(I)...)) | ||
726 | end | ||
727 | @inline function checkbounds_indices(::Type{Bool}, IA::Tuple, | ||
728 | I::Tuple{AbstractArray{Bool,N},Vararg{Any}}) where N | ||
729 | return checkbounds_indices(Bool, IA, (LogicalIndex(I[1]), tail(I)...)) | ||
730 | end | ||
731 | |||
732 | function checkindex(::Type{Bool}, inds::Tuple, I::AbstractArray{<:CartesianIndex}) | ||
733 | b = true | ||
734 | for i in I | ||
735 | b &= checkbounds_indices(Bool, inds, (i,)) | ||
736 | end | ||
737 | b | ||
738 | end | ||
739 | checkindex(::Type{Bool}, inds::Tuple, I::CartesianIndices) = all(checkindex.(Bool, inds, I.indices)) | ||
740 | |||
741 | # combined count of all indices, including CartesianIndex and | ||
742 | # AbstractArray{CartesianIndex} | ||
743 | # rather than returning N, it returns an NTuple{N,Bool} so the result is inferable | ||
744 | @inline index_ndims(i1, I...) = (true, index_ndims(I...)...) | ||
745 | @inline function index_ndims(i1::CartesianIndex, I...) | ||
746 | (map(Returns(true), i1.I)..., index_ndims(I...)...) | ||
747 | end | ||
748 | @inline function index_ndims(i1::AbstractArray{CartesianIndex{N}}, I...) where N | ||
749 | (ntuple(Returns(true), Val(N))..., index_ndims(I...)...) | ||
750 | end | ||
751 | index_ndims() = () | ||
752 | |||
753 | # combined dimensionality of all indices | ||
754 | # rather than returning N, it returns an NTuple{N,Bool} so the result is inferable | ||
755 | @inline index_dimsum(i1, I...) = (index_dimsum(I...)...,) | ||
756 | @inline index_dimsum(::Colon, I...) = (true, index_dimsum(I...)...) | ||
757 | @inline index_dimsum(::AbstractArray{Bool}, I...) = (true, index_dimsum(I...)...) | ||
758 | @inline function index_dimsum(::AbstractArray{<:Any,N}, I...) where N | ||
759 | (ntuple(Returns(true), Val(N))..., index_dimsum(I...)...) | ||
760 | end | ||
761 | index_dimsum() = () | ||
762 | |||
763 | # Recursively compute the lengths of a list of indices, without dropping scalars | ||
764 | index_lengths() = () | ||
765 | @inline index_lengths(::Real, rest...) = (1, index_lengths(rest...)...) | ||
766 | @inline index_lengths(A::AbstractArray, rest...) = (length(A), index_lengths(rest...)...) | ||
767 | |||
768 | # shape of array to create for getindex() with indices I, dropping scalars | ||
769 | # returns a Tuple{Vararg{AbstractUnitRange}} of indices | ||
770 | index_shape() = () | ||
771 | @inline index_shape(::Real, rest...) = index_shape(rest...) | ||
772 | @inline index_shape(A::AbstractArray, rest...) = (axes(A)..., index_shape(rest...)...) | ||
773 | |||
774 | """ | ||
775 | LogicalIndex(mask) | ||
776 | |||
777 | The `LogicalIndex` type is a special vector that simply contains all indices I | ||
778 | where `mask[I]` is true. This specialized type does not support indexing | ||
779 | directly as doing so would require O(n) lookup time. `AbstractArray{Bool}` are | ||
780 | wrapped with `LogicalIndex` upon calling [`to_indices`](@ref). | ||
781 | """ | ||
782 | struct LogicalIndex{T, A<:AbstractArray{Bool}} <: AbstractVector{T} | ||
783 | mask::A | ||
784 | sum::Int | ||
785 | LogicalIndex{T,A}(mask::A) where {T,A<:AbstractArray{Bool}} = new(mask, count(mask)) | ||
786 | end | ||
787 | LogicalIndex(mask::AbstractVector{Bool}) = LogicalIndex{Int, typeof(mask)}(mask) | ||
788 | LogicalIndex(mask::AbstractArray{Bool, N}) where {N} = LogicalIndex{CartesianIndex{N}, typeof(mask)}(mask) | ||
789 | LogicalIndex{Int}(mask::AbstractArray) = LogicalIndex{Int, typeof(mask)}(mask) | ||
790 | size(L::LogicalIndex) = (L.sum,) | ||
791 | length(L::LogicalIndex) = L.sum | ||
792 | collect(L::LogicalIndex) = [i for i in L] | ||
793 | show(io::IO, r::LogicalIndex) = print(io,collect(r)) | ||
794 | print_array(io::IO, X::LogicalIndex) = print_array(io, collect(X)) | ||
795 | # Iteration over LogicalIndex is very performance-critical, but it also must | ||
796 | # support arbitrary AbstractArray{Bool}s with both Int and CartesianIndex. | ||
797 | # Thus the iteration state contains an index iterator and its state. We also | ||
798 | # keep track of the count of elements since we already know how many there | ||
799 | # should be -- this way we don't need to look at future indices to check done. | ||
800 | @inline function iterate(L::LogicalIndex{Int}) | ||
801 | r = LinearIndices(L.mask) | ||
802 | iterate(L, (1, r)) | ||
803 | end | ||
804 | @inline function iterate(L::LogicalIndex{<:CartesianIndex}) | ||
805 | r = CartesianIndices(axes(L.mask)) | ||
806 | iterate(L, (1, r)) | ||
807 | end | ||
808 | @propagate_inbounds function iterate(L::LogicalIndex, s) | ||
809 | # We're looking for the n-th true element, using iterator r at state i | ||
810 | n = s[1] | ||
811 | n > length(L) && return nothing | ||
812 | #unroll once to help inference, cf issue #29418 | ||
813 | idx, i = iterate(tail(s)...) | ||
814 | s = (n+1, s[2], i) | ||
815 | L.mask[idx] && return (idx, s) | ||
816 | while true | ||
817 | idx, i = iterate(tail(s)...) | ||
818 | s = (n+1, s[2], i) | ||
819 | L.mask[idx] && return (idx, s) | ||
820 | end | ||
821 | end | ||
822 | # When wrapping a BitArray, lean heavily upon its internals. | ||
823 | @inline function iterate(L::LogicalIndex{Int,<:BitArray}) | ||
824 | L.sum == 0 && return nothing | ||
825 | Bc = L.mask.chunks | ||
826 | return iterate(L, (1, 1, (), @inbounds Bc[1])) | ||
827 | end | ||
828 | @inline function iterate(L::LogicalIndex{<:CartesianIndex,<:BitArray}) | ||
829 | L.sum == 0 && return nothing | ||
830 | Bc = L.mask.chunks | ||
831 | irest = ntuple(one, ndims(L.mask)-1) | ||
832 | return iterate(L, (1, 1, irest, @inbounds Bc[1])) | ||
833 | end | ||
834 | @inline function iterate(L::LogicalIndex{<:Any,<:BitArray}, (i1, Bi, irest, c)) | ||
835 | Bc = L.mask.chunks | ||
836 | while c == 0 | ||
837 | Bi >= length(Bc) && return nothing | ||
838 | i1 += 64 | ||
839 | @inbounds c = Bc[Bi+=1] | ||
840 | end | ||
841 | tz = trailing_zeros(c) | ||
842 | c = _blsr(c) | ||
843 | i1, irest = _overflowind(i1 + tz, irest, size(L.mask)) | ||
844 | return eltype(L)(i1, irest...), (i1 - tz, Bi, irest, c) | ||
845 | end | ||
846 | |||
847 | @inline checkbounds(::Type{Bool}, A::AbstractArray, I::LogicalIndex{<:Any,<:AbstractArray{Bool,1}}) = | ||
848 | eachindex(IndexLinear(), A) == eachindex(IndexLinear(), I.mask) | ||
849 | @inline checkbounds(::Type{Bool}, A::AbstractArray, I::LogicalIndex) = axes(A) == axes(I.mask) | ||
850 | @inline checkindex(::Type{Bool}, indx::AbstractUnitRange, I::LogicalIndex) = (indx,) == axes(I.mask) | ||
851 | checkindex(::Type{Bool}, inds::Tuple, I::LogicalIndex) = checkbounds_indices(Bool, inds, axes(I.mask)) | ||
852 | |||
853 | ensure_indexable(I::Tuple{}) = () | ||
854 | @inline ensure_indexable(I::Tuple{Any, Vararg{Any}}) = (I[1], ensure_indexable(tail(I))...) | ||
855 | @inline ensure_indexable(I::Tuple{LogicalIndex, Vararg{Any}}) = (collect(I[1]), ensure_indexable(tail(I))...) | ||
856 | |||
857 | # In simple cases, we know that we don't need to use axes(A). Optimize those | ||
858 | # until Julia gets smart enough to elide the call on its own: | ||
859 | @inline to_indices(A, I::Tuple{Vararg{Union{Integer, CartesianIndex}}}) = to_indices(A, (), I) | ||
860 | # But some index types require more context spanning multiple indices | ||
861 | # CartesianIndex is unfolded outside the inner to_indices for better inference | ||
862 | _to_indices1(A, inds, I1::CartesianIndex) = map(Fix1(to_index, A), I1.I) | ||
863 | _cutdim(inds, I1::CartesianIndex) = IteratorsMD.split(inds, Val(length(I1)))[2] | ||
864 | # For arrays of CartesianIndex, we just skip the appropriate number of inds | ||
865 | _cutdim(inds, I1::AbstractArray{CartesianIndex{N}}) where {N} = IteratorsMD.split(inds, Val(N))[2] | ||
866 | # And boolean arrays behave similarly; they also skip their number of dimensions | ||
867 | _cutdim(inds::Tuple, I1::AbstractArray{Bool}) = IteratorsMD.split(inds, Val(ndims(I1)))[2] | ||
868 | # As an optimization, we allow trailing Array{Bool} and BitArray to be linear over trailing dimensions | ||
869 | @inline to_indices(A, inds, I::Tuple{Union{Array{Bool,N}, BitArray{N}}}) where {N} = | ||
870 | (_maybe_linear_logical_index(IndexStyle(A), A, I[1]),) | ||
871 | _maybe_linear_logical_index(::IndexStyle, A, i) = to_index(A, i) | ||
872 | _maybe_linear_logical_index(::IndexLinear, A, i) = LogicalIndex{Int}(i) | ||
873 | |||
874 | # Colons get converted to slices by `uncolon` | ||
875 | _to_indices1(A, inds, I1::Colon) = (uncolon(inds),) | ||
876 | |||
877 | uncolon(::Tuple{}) = Slice(OneTo(1)) | ||
878 | uncolon(inds::Tuple) = Slice(inds[1]) | ||
879 | |||
880 | ### From abstractarray.jl: Internal multidimensional indexing definitions ### | ||
881 | getindex(x::Union{Number,AbstractChar}, ::CartesianIndex{0}) = x | ||
882 | getindex(t::Tuple, i::CartesianIndex{1}) = getindex(t, i.I[1]) | ||
883 | |||
884 | # These are not defined on directly on getindex to avoid | ||
885 | # ambiguities for AbstractArray subtypes. See the note in abstractarray.jl | ||
886 | |||
887 | @inline function _getindex(l::IndexStyle, A::AbstractArray, I::Union{Real, AbstractArray}...) | ||
888 | @boundscheck checkbounds(A, I...) | ||
889 | return _unsafe_getindex(l, _maybe_reshape(l, A, I...), I...) | ||
890 | end | ||
891 | # But we can speed up IndexCartesian arrays by reshaping them to the appropriate dimensionality: | ||
892 | _maybe_reshape(::IndexLinear, A::AbstractArray, I...) = A | ||
893 | _maybe_reshape(::IndexCartesian, A::AbstractVector, I...) = A | ||
894 | @inline _maybe_reshape(::IndexCartesian, A::AbstractArray, I...) = __maybe_reshape(A, index_ndims(I...)) | ||
895 | @inline __maybe_reshape(A::AbstractArray{T,N}, ::NTuple{N,Any}) where {T,N} = A | ||
896 | @inline __maybe_reshape(A::AbstractArray, ::NTuple{N,Any}) where {N} = reshape(A, Val(N)) | ||
897 | |||
898 | function _unsafe_getindex(::IndexStyle, A::AbstractArray, I::Vararg{Union{Real, AbstractArray}, N}) where N | ||
899 | # This is specifically not inlined to prevent excessive allocations in type unstable code | ||
900 | shape = index_shape(I...) | ||
901 | dest = similar(A, shape) | ||
902 | map(length, axes(dest)) == map(length, shape) || throw_checksize_error(dest, shape) | ||
903 | _unsafe_getindex!(dest, A, I...) # usually a generated function, don't allow it to impact inference result | ||
904 | return dest | ||
905 | end | ||
906 | |||
907 | function _generate_unsafe_getindex!_body(N::Int) | ||
908 | quote | ||
909 | @inline | ||
910 | D = eachindex(dest) | ||
911 | Dy = iterate(D) | ||
912 | @inbounds @nloops $N j d->I[d] begin | ||
913 | # This condition is never hit, but at the moment | ||
914 | # the optimizer is not clever enough to split the union without it | ||
915 | Dy === nothing && return dest | ||
916 | (idx, state) = Dy | ||
917 | dest[idx] = @ncall $N getindex src j | ||
918 | Dy = iterate(D, state) | ||
919 | end | ||
920 | return dest | ||
921 | end | ||
922 | end | ||
923 | |||
924 | # Always index with the exactly indices provided. | ||
925 | @generated function _unsafe_getindex!(dest::AbstractArray, src::AbstractArray, I::Vararg{Union{Real, AbstractArray}, N}) where N | ||
926 | _generate_unsafe_getindex!_body(N) | ||
927 | end | ||
928 | |||
929 | # manually written-out specializations for 1 and 2 arguments to save compile time | ||
930 | @eval function _unsafe_getindex!(dest::AbstractArray, src::AbstractArray, I::Vararg{Union{Real, AbstractArray},1}) | ||
931 | $(_generate_unsafe_getindex!_body(1)) | ||
932 | end | ||
933 | |||
934 | @eval function _unsafe_getindex!(dest::AbstractArray, src::AbstractArray, I::Vararg{Union{Real, AbstractArray},2}) | ||
935 | $(_generate_unsafe_getindex!_body(2)) | ||
936 | end | ||
937 | |||
938 | @noinline throw_checksize_error(A, sz) = throw(DimensionMismatch("output array is the wrong size; expected $sz, got $(size(A))")) | ||
939 | |||
940 | ## setindex! ## | ||
941 | function _setindex!(l::IndexStyle, A::AbstractArray, x, I::Union{Real, AbstractArray}...) | ||
942 | @inline | ||
943 | @boundscheck checkbounds(A, I...) | ||
944 | _unsafe_setindex!(l, _maybe_reshape(l, A, I...), x, I...) | ||
945 | A | ||
946 | end | ||
947 | |||
948 | function _generate_unsafe_setindex!_body(N::Int) | ||
949 | quote | ||
950 | x′ = unalias(A, x) | ||
951 | @nexprs $N d->(I_d = unalias(A, I[d])) | ||
952 | idxlens = @ncall $N index_lengths I | ||
953 | @ncall $N setindex_shape_check x′ (d->idxlens[d]) | ||
954 | Xy = iterate(x′) | ||
955 | @inbounds @nloops $N i d->I_d begin | ||
956 | # This is never reached, but serves as an assumption for | ||
957 | # the optimizer that it does not need to emit error paths | ||
958 | Xy === nothing && break | ||
959 | (val, state) = Xy | ||
960 | @ncall $N setindex! A val i | ||
961 | Xy = iterate(x′, state) | ||
962 | end | ||
963 | A | ||
964 | end | ||
965 | end | ||
966 | |||
967 | @generated function _unsafe_setindex!(::IndexStyle, A::AbstractArray, x, I::Vararg{Union{Real,AbstractArray}, N}) where N | ||
968 | _generate_unsafe_setindex!_body(N) | ||
969 | end | ||
970 | |||
971 | @eval function _unsafe_setindex!(::IndexStyle, A::AbstractArray, x, I::Vararg{Union{Real,AbstractArray},1}) | ||
972 | $(_generate_unsafe_setindex!_body(1)) | ||
973 | end | ||
974 | |||
975 | @eval function _unsafe_setindex!(::IndexStyle, A::AbstractArray, x, I::Vararg{Union{Real,AbstractArray},2}) | ||
976 | $(_generate_unsafe_setindex!_body(2)) | ||
977 | end | ||
978 | |||
979 | diff(a::AbstractVector) = diff(a, dims=1) | ||
980 | |||
981 | """ | ||
982 | diff(A::AbstractVector) | ||
983 | diff(A::AbstractArray; dims::Integer) | ||
984 | |||
985 | Finite difference operator on a vector or a multidimensional array `A`. In the | ||
986 | latter case the dimension to operate on needs to be specified with the `dims` | ||
987 | keyword argument. | ||
988 | |||
989 | !!! compat "Julia 1.1" | ||
990 | `diff` for arrays with dimension higher than 2 requires at least Julia 1.1. | ||
991 | |||
992 | # Examples | ||
993 | ```jldoctest | ||
994 | julia> a = [2 4; 6 16] | ||
995 | 2×2 Matrix{Int64}: | ||
996 | 2 4 | ||
997 | 6 16 | ||
998 | |||
999 | julia> diff(a, dims=2) | ||
1000 | 2×1 Matrix{Int64}: | ||
1001 | 2 | ||
1002 | 10 | ||
1003 | |||
1004 | julia> diff(vec(a)) | ||
1005 | 3-element Vector{Int64}: | ||
1006 | 4 | ||
1007 | -2 | ||
1008 | 12 | ||
1009 | ``` | ||
1010 | """ | ||
1011 | function diff(a::AbstractArray{T,N}; dims::Integer) where {T,N} | ||
1012 | require_one_based_indexing(a) | ||
1013 | 1 <= dims <= N || throw(ArgumentError("dimension $dims out of range (1:$N)")) | ||
1014 | |||
1015 | r = axes(a) | ||
1016 | r0 = ntuple(i -> i == dims ? UnitRange(1, last(r[i]) - 1) : UnitRange(r[i]), N) | ||
1017 | r1 = ntuple(i -> i == dims ? UnitRange(2, last(r[i])) : UnitRange(r[i]), N) | ||
1018 | |||
1019 | return view(a, r1...) .- view(a, r0...) | ||
1020 | end | ||
1021 | function diff(r::AbstractRange{T}; dims::Integer=1) where {T} | ||
1022 | dims == 1 || throw(ArgumentError("dimension $dims out of range (1:1)")) | ||
1023 | return [@inbounds r[i+1] - r[i] for i in firstindex(r):lastindex(r)-1] | ||
1024 | end | ||
1025 | |||
1026 | ### from abstractarray.jl | ||
1027 | |||
1028 | # In the common case where we have two views into the same parent, aliasing checks | ||
1029 | # are _much_ easier and more important to get right | ||
1030 | function mightalias(A::SubArray{T,<:Any,P}, B::SubArray{T,<:Any,P}) where {T,P} | ||
1031 | if !_parentsmatch(A.parent, B.parent) | ||
1032 | # We cannot do any better than the usual dataids check | ||
1033 | return !_isdisjoint(dataids(A), dataids(B)) | ||
1034 | end | ||
1035 | # Now we know that A.parent === B.parent. This means that the indices of A | ||
1036 | # and B are the same length and indexing into the same dimensions. We can | ||
1037 | # just walk through them and check for overlaps: O(ndims(A)). We must finally | ||
1038 | # ensure that the indices don't alias with either parent | ||
1039 | return _indicesmightoverlap(A.indices, B.indices) || | ||
1040 | !_isdisjoint(dataids(A.parent), _splatmap(dataids, B.indices)) || | ||
1041 | !_isdisjoint(dataids(B.parent), _splatmap(dataids, A.indices)) | ||
1042 | end | ||
1043 | _parentsmatch(A::AbstractArray, B::AbstractArray) = A === B | ||
1044 | # Two reshape(::Array)s of the same size aren't `===` because they have different headers | ||
1045 | _parentsmatch(A::Array, B::Array) = pointer(A) == pointer(B) && size(A) == size(B) | ||
1046 | |||
1047 | _indicesmightoverlap(A::Tuple{}, B::Tuple{}) = true | ||
1048 | _indicesmightoverlap(A::Tuple{}, B::Tuple) = error("malformed subarray") | ||
1049 | _indicesmightoverlap(A::Tuple, B::Tuple{}) = error("malformed subarray") | ||
1050 | # For ranges, it's relatively cheap to construct the intersection | ||
1051 | @inline function _indicesmightoverlap(A::Tuple{AbstractRange, Vararg{Any}}, B::Tuple{AbstractRange, Vararg{Any}}) | ||
1052 | !isempty(intersect(A[1], B[1])) ? _indicesmightoverlap(tail(A), tail(B)) : false | ||
1053 | end | ||
1054 | # But in the common AbstractUnitRange case, there's an even faster shortcut | ||
1055 | @inline function _indicesmightoverlap(A::Tuple{AbstractUnitRange, Vararg{Any}}, B::Tuple{AbstractUnitRange, Vararg{Any}}) | ||
1056 | max(first(A[1]),first(B[1])) <= min(last(A[1]),last(B[1])) ? _indicesmightoverlap(tail(A), tail(B)) : false | ||
1057 | end | ||
1058 | # And we can check scalars against each other and scalars against arrays quite easily | ||
1059 | @inline _indicesmightoverlap(A::Tuple{Real, Vararg{Any}}, B::Tuple{Real, Vararg{Any}}) = | ||
1060 | A[1] == B[1] ? _indicesmightoverlap(tail(A), tail(B)) : false | ||
1061 | @inline _indicesmightoverlap(A::Tuple{Real, Vararg{Any}}, B::Tuple{AbstractArray, Vararg{Any}}) = | ||
1062 | A[1] in B[1] ? _indicesmightoverlap(tail(A), tail(B)) : false | ||
1063 | @inline _indicesmightoverlap(A::Tuple{AbstractArray, Vararg{Any}}, B::Tuple{Real, Vararg{Any}}) = | ||
1064 | B[1] in A[1] ? _indicesmightoverlap(tail(A), tail(B)) : false | ||
1065 | # And small arrays are quick, too | ||
1066 | @inline function _indicesmightoverlap(A::Tuple{AbstractArray, Vararg{Any}}, B::Tuple{AbstractArray, Vararg{Any}}) | ||
1067 | if length(A[1]) == 1 | ||
1068 | return A[1][1] in B[1] ? _indicesmightoverlap(tail(A), tail(B)) : false | ||
1069 | elseif length(B[1]) == 1 | ||
1070 | return B[1][1] in A[1] ? _indicesmightoverlap(tail(A), tail(B)) : false | ||
1071 | else | ||
1072 | # But checking larger arrays requires O(m*n) and is too much work | ||
1073 | return true | ||
1074 | end | ||
1075 | end | ||
1076 | # And in general, checking the intersection is too much work | ||
1077 | _indicesmightoverlap(A::Tuple{Any, Vararg{Any}}, B::Tuple{Any, Vararg{Any}}) = true | ||
1078 | |||
1079 | """ | ||
1080 | fill!(A, x) | ||
1081 | |||
1082 | Fill array `A` with the value `x`. If `x` is an object reference, all elements will refer to | ||
1083 | the same object. `fill!(A, Foo())` will return `A` filled with the result of evaluating | ||
1084 | `Foo()` once. | ||
1085 | |||
1086 | # Examples | ||
1087 | ```jldoctest | ||
1088 | julia> A = zeros(2,3) | ||
1089 | 2×3 Matrix{Float64}: | ||
1090 | 0.0 0.0 0.0 | ||
1091 | 0.0 0.0 0.0 | ||
1092 | |||
1093 | julia> fill!(A, 2.) | ||
1094 | 2×3 Matrix{Float64}: | ||
1095 | 2.0 2.0 2.0 | ||
1096 | 2.0 2.0 2.0 | ||
1097 | |||
1098 | julia> a = [1, 1, 1]; A = fill!(Vector{Vector{Int}}(undef, 3), a); a[1] = 2; A | ||
1099 | 3-element Vector{Vector{Int64}}: | ||
1100 | [2, 1, 1] | ||
1101 | [2, 1, 1] | ||
1102 | [2, 1, 1] | ||
1103 | |||
1104 | julia> x = 0; f() = (global x += 1; x); fill!(Vector{Int}(undef, 3), f()) | ||
1105 | 3-element Vector{Int64}: | ||
1106 | 1 | ||
1107 | 1 | ||
1108 | 1 | ||
1109 | ``` | ||
1110 | """ | ||
1111 | function fill!(A::AbstractArray{T}, x) where T | ||
1112 | xT = convert(T, x) | ||
1113 | for I in eachindex(A) | ||
1114 | @inbounds A[I] = xT | ||
1115 | end | ||
1116 | A | ||
1117 | end | ||
1118 | |||
1119 | function copyto!(dest::AbstractArray{T1,N}, Rdest::CartesianIndices{N}, | ||
1120 | src::AbstractArray{T2,N}, Rsrc::CartesianIndices{N}) where {T1,T2,N} | ||
1121 | isempty(Rdest) && return dest | ||
1122 | if size(Rdest) != size(Rsrc) | ||
1123 | throw(ArgumentError("source and destination must have same size (got $(size(Rsrc)) and $(size(Rdest)))")) | ||
1124 | end | ||
1125 | checkbounds(dest, first(Rdest)) | ||
1126 | checkbounds(dest, last(Rdest)) | ||
1127 | checkbounds(src, first(Rsrc)) | ||
1128 | checkbounds(src, last(Rsrc)) | ||
1129 | src′ = unalias(dest, src) | ||
1130 | CRdest = CartesianIndices(Rdest) | ||
1131 | CRsrc = CartesianIndices(Rsrc) | ||
1132 | ΔI = first(CRdest) - first(CRsrc) | ||
1133 | if @generated | ||
1134 | quote | ||
1135 | @nloops $N i (n->CRsrc.indices[n]) begin | ||
1136 | @inbounds @nref($N,dest,n->Rdest.indices[n][i_n+ΔI[n]]) = @nref($N,src′,n->Rsrc.indices[n][i_n]) | ||
1137 | end | ||
1138 | end | ||
1139 | else | ||
1140 | for I in CRsrc | ||
1141 | @inbounds dest[Rdest[I + ΔI]] = src′[Rsrc[I]] | ||
1142 | end | ||
1143 | end | ||
1144 | dest | ||
1145 | end | ||
1146 | |||
1147 | """ | ||
1148 | copyto!(dest, Rdest::CartesianIndices, src, Rsrc::CartesianIndices) -> dest | ||
1149 | |||
1150 | Copy the block of `src` in the range of `Rsrc` to the block of `dest` | ||
1151 | in the range of `Rdest`. The sizes of the two regions must match. | ||
1152 | |||
1153 | # Examples | ||
1154 | ```jldoctest | ||
1155 | julia> A = zeros(5, 5); | ||
1156 | |||
1157 | julia> B = [1 2; 3 4]; | ||
1158 | |||
1159 | julia> Ainds = CartesianIndices((2:3, 2:3)); | ||
1160 | |||
1161 | julia> Binds = CartesianIndices(B); | ||
1162 | |||
1163 | julia> copyto!(A, Ainds, B, Binds) | ||
1164 | 5×5 Matrix{Float64}: | ||
1165 | 0.0 0.0 0.0 0.0 0.0 | ||
1166 | 0.0 1.0 2.0 0.0 0.0 | ||
1167 | 0.0 3.0 4.0 0.0 0.0 | ||
1168 | 0.0 0.0 0.0 0.0 0.0 | ||
1169 | 0.0 0.0 0.0 0.0 0.0 | ||
1170 | ``` | ||
1171 | """ | ||
1172 | copyto!(::AbstractArray, ::CartesianIndices, ::AbstractArray, ::CartesianIndices) | ||
1173 | |||
1174 | # circshift! | ||
1175 | circshift!(dest::AbstractArray, src, ::Tuple{}) = copyto!(dest, src) | ||
1176 | """ | ||
1177 | circshift!(dest, src, shifts) | ||
1178 | |||
1179 | Circularly shift, i.e. rotate, the data in `src`, storing the result in | ||
1180 | `dest`. `shifts` specifies the amount to shift in each dimension. | ||
1181 | |||
1182 | $(_DOCS_ALIASING_WARNING) | ||
1183 | |||
1184 | See also [`circshift`](@ref). | ||
1185 | """ | ||
1186 | @noinline function circshift!(dest::AbstractArray{T,N}, src, shiftamt::DimsInteger) where {T,N} | ||
1187 | dest === src && throw(ArgumentError("dest and src must be separate arrays")) | ||
1188 | inds = axes(src) | ||
1189 | axes(dest) == inds || throw(ArgumentError("indices of src and dest must match (got $inds and $(axes(dest)))")) | ||
1190 | isempty(src) && return dest | ||
1191 | _circshift!(dest, (), src, (), inds, fill_to_length(shiftamt, 0, Val(N))) | ||
1192 | end | ||
1193 | |||
1194 | circshift!(dest::AbstractArray, src, shiftamt) = | ||
1195 | circshift!(dest, src, map(Integer, (shiftamt...,))) | ||
1196 | |||
1197 | # For each dimension, we copy the first half of src to the second half | ||
1198 | # of dest, and the second half of src to the first half of dest. This | ||
1199 | # uses a recursive bifurcation strategy so that these splits can be | ||
1200 | # encoded by ranges, which means that we need only one call to `mod` | ||
1201 | # per dimension rather than one call per index. | ||
1202 | # `rdest` and `rsrc` are tuples-of-ranges that grow one dimension at a | ||
1203 | # time; when all the dimensions have been filled in, you call `copyto!` | ||
1204 | # for that block. In other words, in two dimensions schematically we | ||
1205 | # have the following call sequence (--> means a call): | ||
1206 | # circshift!(dest, src, shiftamt) --> | ||
1207 | # _circshift!(dest, src, ("first half of dim1",)) --> | ||
1208 | # _circshift!(dest, src, ("first half of dim1", "first half of dim2")) --> copyto! | ||
1209 | # _circshift!(dest, src, ("first half of dim1", "second half of dim2")) --> copyto! | ||
1210 | # _circshift!(dest, src, ("second half of dim1",)) --> | ||
1211 | # _circshift!(dest, src, ("second half of dim1", "first half of dim2")) --> copyto! | ||
1212 | # _circshift!(dest, src, ("second half of dim1", "second half of dim2")) --> copyto! | ||
1213 | @inline function _circshift!(dest, rdest, src, rsrc, | ||
1214 | inds::Tuple{AbstractUnitRange,Vararg{Any}}, | ||
1215 | shiftamt::Tuple{Integer,Vararg{Any}})::typeof(dest) | ||
1216 | ind1, d = inds[1], shiftamt[1] | ||
1217 | s = mod(d, length(ind1)) | ||
1218 | sf, sl = first(ind1)+s, last(ind1)-s | ||
1219 | r1, r2 = first(ind1):sf-1, sf:last(ind1) | ||
1220 | r3, r4 = first(ind1):sl, sl+1:last(ind1) | ||
1221 | tinds, tshiftamt = tail(inds), tail(shiftamt) | ||
1222 | _circshift!(dest, (rdest..., r1), src, (rsrc..., r4), tinds, tshiftamt) | ||
1223 | _circshift!(dest, (rdest..., r2), src, (rsrc..., r3), tinds, tshiftamt) | ||
1224 | end | ||
1225 | # At least one of inds, shiftamt is empty | ||
1226 | function _circshift!(dest, rdest, src, rsrc, inds, shiftamt) | ||
1227 | copyto!(dest, CartesianIndices(rdest), src, CartesianIndices(rsrc)) | ||
1228 | end | ||
1229 | |||
1230 | # circcopy! | ||
1231 | """ | ||
1232 | circcopy!(dest, src) | ||
1233 | |||
1234 | Copy `src` to `dest`, indexing each dimension modulo its length. | ||
1235 | `src` and `dest` must have the same size, but can be offset in | ||
1236 | their indices; any offset results in a (circular) wraparound. If the | ||
1237 | arrays have overlapping indices, then on the domain of the overlap | ||
1238 | `dest` agrees with `src`. | ||
1239 | |||
1240 | $(_DOCS_ALIASING_WARNING) | ||
1241 | |||
1242 | See also: [`circshift`](@ref). | ||
1243 | |||
1244 | # Examples | ||
1245 | ```julia-repl | ||
1246 | julia> src = reshape(Vector(1:16), (4,4)) | ||
1247 | 4×4 Array{Int64,2}: | ||
1248 | 1 5 9 13 | ||
1249 | 2 6 10 14 | ||
1250 | 3 7 11 15 | ||
1251 | 4 8 12 16 | ||
1252 | |||
1253 | julia> dest = OffsetArray{Int}(undef, (0:3,2:5)) | ||
1254 | |||
1255 | julia> circcopy!(dest, src) | ||
1256 | OffsetArrays.OffsetArray{Int64,2,Array{Int64,2}} with indices 0:3×2:5: | ||
1257 | 8 12 16 4 | ||
1258 | 5 9 13 1 | ||
1259 | 6 10 14 2 | ||
1260 | 7 11 15 3 | ||
1261 | |||
1262 | julia> dest[1:3,2:4] == src[1:3,2:4] | ||
1263 | true | ||
1264 | ``` | ||
1265 | """ | ||
1266 | function circcopy!(dest, src) | ||
1267 | dest === src && throw(ArgumentError("dest and src must be separate arrays")) | ||
1268 | indssrc, indsdest = axes(src), axes(dest) | ||
1269 | if (szsrc = map(length, indssrc)) != (szdest = map(length, indsdest)) | ||
1270 | throw(DimensionMismatch("src and dest must have the same sizes (got $szsrc and $szdest)")) | ||
1271 | end | ||
1272 | shift = map((isrc, idest)->first(isrc)-first(idest), indssrc, indsdest) | ||
1273 | all(x->x==0, shift) && return copyto!(dest, src) | ||
1274 | _circcopy!(dest, (), indsdest, src, (), indssrc) | ||
1275 | end | ||
1276 | |||
1277 | # This uses the same strategy described above for _circshift! | ||
1278 | @inline function _circcopy!(dest, rdest, indsdest::Tuple{AbstractUnitRange,Vararg{Any}}, | ||
1279 | src, rsrc, indssrc::Tuple{AbstractUnitRange,Vararg{Any}})::typeof(dest) | ||
1280 | indd1, inds1 = indsdest[1], indssrc[1] | ||
1281 | l = length(indd1) | ||
1282 | s = mod(first(inds1)-first(indd1), l) | ||
1283 | sdf = first(indd1)+s | ||
1284 | rd1, rd2 = first(indd1):sdf-1, sdf:last(indd1) | ||
1285 | ssf = last(inds1)-s | ||
1286 | rs1, rs2 = first(inds1):ssf, ssf+1:last(inds1) | ||
1287 | tindsd, tindss = tail(indsdest), tail(indssrc) | ||
1288 | _circcopy!(dest, (rdest..., rd1), tindsd, src, (rsrc..., rs2), tindss) | ||
1289 | _circcopy!(dest, (rdest..., rd2), tindsd, src, (rsrc..., rs1), tindss) | ||
1290 | end | ||
1291 | |||
1292 | # At least one of indsdest, indssrc are empty (and both should be, since we've checked) | ||
1293 | function _circcopy!(dest, rdest, indsdest, src, rsrc, indssrc) | ||
1294 | copyto!(dest, CartesianIndices(rdest), src, CartesianIndices(rsrc)) | ||
1295 | end | ||
1296 | |||
1297 | ### BitArrays | ||
1298 | |||
1299 | ## getindex | ||
1300 | |||
1301 | # contiguous multidimensional indexing: if the first dimension is a range, | ||
1302 | # we can get some performance from using copy_chunks! | ||
1303 | @inline function _unsafe_getindex!(X::BitArray, B::BitArray, I0::Union{AbstractUnitRange{Int},Slice}) | ||
1304 | copy_chunks!(X.chunks, 1, B.chunks, indexoffset(I0)+1, length(I0)) | ||
1305 | return X | ||
1306 | end | ||
1307 | |||
1308 | # Optimization where the inner dimension is contiguous improves perf dramatically | ||
1309 | @generated function _unsafe_getindex!(X::BitArray, B::BitArray, | ||
1310 | I0::Union{Slice,UnitRange{Int}}, I::Union{Int,AbstractUnitRange{Int},Slice}...) | ||
1311 | N = length(I) | ||
1312 | quote | ||
1313 | $(Expr(:meta, :inline)) | ||
1314 | @nexprs $N d->(I_d = I[d]) | ||
1315 | |||
1316 | idxlens = @ncall $N index_lengths I0 I | ||
1317 | |||
1318 | f0 = indexoffset(I0)+1 | ||
1319 | l0 = idxlens[1] | ||
1320 | |||
1321 | gap_lst_1 = 0 | ||
1322 | @nexprs $N d->(gap_lst_{d+1} = idxlens[d+1]) | ||
1323 | stride = 1 | ||
1324 | ind = f0 | ||
1325 | @nexprs $N d->begin | ||
1326 | stride *= size(B, d) | ||
1327 | stride_lst_d = stride | ||
1328 | ind += stride * indexoffset(I_d) | ||
1329 | gap_lst_{d+1} *= stride | ||
1330 | end | ||
1331 | |||
1332 | storeind = 1 | ||
1333 | Xc, Bc = X.chunks, B.chunks | ||
1334 | @nloops($N, i, d->(1:idxlens[d+1]), | ||
1335 | d->nothing, # PRE | ||
1336 | d->(ind += stride_lst_d - gap_lst_d), # POST | ||
1337 | begin # BODY | ||
1338 | copy_chunks!(Xc, storeind, Bc, ind, l0) | ||
1339 | storeind += l0 | ||
1340 | end) | ||
1341 | return X | ||
1342 | end | ||
1343 | end | ||
1344 | |||
1345 | # in the general multidimensional non-scalar case, can we do about 10% better | ||
1346 | # in most cases by manually hoisting the bitarray chunks access out of the loop | ||
1347 | # (This should really be handled by the compiler or with an immutable BitArray) | ||
1348 | @generated function _unsafe_getindex!(X::BitArray, B::BitArray, I::Union{Int,AbstractArray{Int}}...) | ||
1349 | N = length(I) | ||
1350 | quote | ||
1351 | $(Expr(:meta, :inline)) | ||
1352 | stride_1 = 1 | ||
1353 | @nexprs $N d->(stride_{d+1} = stride_d*size(B, d)) | ||
1354 | $(Symbol(:offset_, N)) = 1 | ||
1355 | ind = 0 | ||
1356 | Xc, Bc = X.chunks, B.chunks | ||
1357 | @nloops $N i d->I[d] d->(@inbounds offset_{d-1} = offset_d + (i_d-1)*stride_d) begin | ||
1358 | ind += 1 | ||
1359 | unsafe_bitsetindex!(Xc, unsafe_bitgetindex(Bc, offset_0), ind) | ||
1360 | end | ||
1361 | return X | ||
1362 | end | ||
1363 | end | ||
1364 | |||
1365 | ## setindex! | ||
1366 | |||
1367 | function copy_to_bitarray_chunks!(Bc::Vector{UInt64}, pos_d::Int, C::StridedArray, pos_s::Int, numbits::Int) | ||
1368 | bind = pos_d | ||
1369 | cind = pos_s | ||
1370 | lastind = pos_d + numbits - 1 | ||
1371 | @inbounds while bind ≤ lastind | ||
1372 | unsafe_bitsetindex!(Bc, Bool(C[cind]), bind) | ||
1373 | bind += 1 | ||
1374 | cind += 1 | ||
1375 | end | ||
1376 | end | ||
1377 | |||
1378 | # Note: the next two functions rely on the following definition of the conversion to Bool: | ||
1379 | # convert(::Type{Bool}, x::Real) = x==0 ? false : x==1 ? true : throw(InexactError(...)) | ||
1380 | # they're used to preemptively check in bulk when possible, which is much faster. | ||
1381 | # Also, the functions can be overloaded for custom types T<:Real : | ||
1382 | # a) in the unlikely eventuality that they use a different logic for Bool conversion | ||
1383 | # b) to skip the check if not necessary | ||
1384 | @inline try_bool_conversion(x::Real) = | ||
1385 | x == 0 || x == 1 || throw(InexactError(:try_bool_conversion, Bool, x)) | ||
1386 | @inline unchecked_bool_convert(x::Real) = x == 1 | ||
1387 | |||
1388 | function copy_to_bitarray_chunks!(Bc::Vector{UInt64}, pos_d::Int, C::StridedArray{<:Real}, pos_s::Int, numbits::Int) | ||
1389 | @inbounds for i = (1:numbits) .+ (pos_s - 1) | ||
1390 | try_bool_conversion(C[i]) | ||
1391 | end | ||
1392 | |||
1393 | kd0, ld0 = get_chunks_id(pos_d) | ||
1394 | kd1, ld1 = get_chunks_id(pos_d + numbits - 1) | ||
1395 | |||
1396 | delta_kd = kd1 - kd0 | ||
1397 | |||
1398 | u = _msk64 | ||
1399 | if delta_kd == 0 | ||
1400 | msk_d0 = msk_d1 = ~(u << ld0) | (u << (ld1+1)) | ||
1401 | lt0 = ld1 | ||
1402 | else | ||
1403 | msk_d0 = ~(u << ld0) | ||
1404 | msk_d1 = (u << (ld1+1)) | ||
1405 | lt0 = 63 | ||
1406 | end | ||
1407 | |||
1408 | bind = kd0 | ||
1409 | ind = pos_s | ||
1410 | @inbounds if ld0 > 0 | ||
1411 | c = UInt64(0) | ||
1412 | for j = ld0:lt0 | ||
1413 | c |= (UInt64(unchecked_bool_convert(C[ind])) << j) | ||
1414 | ind += 1 | ||
1415 | end | ||
1416 | Bc[kd0] = (Bc[kd0] & msk_d0) | (c & ~msk_d0) | ||
1417 | bind += 1 | ||
1418 | end | ||
1419 | |||
1420 | nc = _div64(numbits - ind + pos_s) | ||
1421 | @inbounds for i = 1:nc | ||
1422 | c = UInt64(0) | ||
1423 | for j = 0:63 | ||
1424 | c |= (UInt64(unchecked_bool_convert(C[ind])) << j) | ||
1425 | ind += 1 | ||
1426 | end | ||
1427 | Bc[bind] = c | ||
1428 | bind += 1 | ||
1429 | end | ||
1430 | |||
1431 | @inbounds if bind ≤ kd1 | ||
1432 | @assert bind == kd1 | ||
1433 | c = UInt64(0) | ||
1434 | for j = 0:ld1 | ||
1435 | c |= (UInt64(unchecked_bool_convert(C[ind])) << j) | ||
1436 | ind += 1 | ||
1437 | end | ||
1438 | Bc[kd1] = (Bc[kd1] & msk_d1) | (c & ~msk_d1) | ||
1439 | end | ||
1440 | end | ||
1441 | |||
1442 | # contiguous multidimensional indexing: if the first dimension is a range, | ||
1443 | # we can get some performance from using copy_chunks! | ||
1444 | |||
1445 | @inline function setindex!(B::BitArray, X::Union{StridedArray,BitArray}, J0::Union{Colon,AbstractUnitRange{Int}}) | ||
1446 | I0 = to_indices(B, (J0,))[1] | ||
1447 | @boundscheck checkbounds(B, I0) | ||
1448 | l0 = length(I0) | ||
1449 | setindex_shape_check(X, l0) | ||
1450 | l0 == 0 && return B | ||
1451 | f0 = indexoffset(I0)+1 | ||
1452 | copy_to_bitarray_chunks!(B.chunks, f0, X, 1, l0) | ||
1453 | return B | ||
1454 | end | ||
1455 | |||
1456 | @inline function setindex!(B::BitArray, X::Union{StridedArray,BitArray}, | ||
1457 | I0::Union{Colon,AbstractUnitRange{Int}}, I::Union{Int,AbstractUnitRange{Int},Colon}...) | ||
1458 | J = to_indices(B, (I0, I...)) | ||
1459 | @boundscheck checkbounds(B, J...) | ||
1460 | _unsafe_setindex!(B, X, J...) | ||
1461 | end | ||
1462 | @generated function _unsafe_setindex!(B::BitArray, X::Union{StridedArray,BitArray}, | ||
1463 | I0::Union{Slice,AbstractUnitRange{Int}}, I::Union{Int,AbstractUnitRange{Int},Slice}...) | ||
1464 | N = length(I) | ||
1465 | quote | ||
1466 | idxlens = @ncall $N index_lengths I0 d->I[d] | ||
1467 | @ncall $N setindex_shape_check X idxlens[1] d->idxlens[d+1] | ||
1468 | isempty(X) && return B | ||
1469 | f0 = indexoffset(I0)+1 | ||
1470 | l0 = idxlens[1] | ||
1471 | |||
1472 | gap_lst_1 = 0 | ||
1473 | @nexprs $N d->(gap_lst_{d+1} = idxlens[d+1]) | ||
1474 | stride = 1 | ||
1475 | ind = f0 | ||
1476 | @nexprs $N d->begin | ||
1477 | stride *= size(B, d) | ||
1478 | stride_lst_d = stride | ||
1479 | ind += stride * indexoffset(I[d]) | ||
1480 | gap_lst_{d+1} *= stride | ||
1481 | end | ||
1482 | |||
1483 | refind = 1 | ||
1484 | Bc = B.chunks | ||
1485 | @nloops($N, i, d->I[d], | ||
1486 | d->nothing, # PRE | ||
1487 | d->(ind += stride_lst_d - gap_lst_d), # POST | ||
1488 | begin # BODY | ||
1489 | copy_to_bitarray_chunks!(Bc, ind, X, refind, l0) | ||
1490 | refind += l0 | ||
1491 | end) | ||
1492 | |||
1493 | return B | ||
1494 | end | ||
1495 | end | ||
1496 | |||
1497 | @propagate_inbounds function setindex!(B::BitArray, X::AbstractArray, | ||
1498 | I0::Union{Colon,AbstractUnitRange{Int}}, I::Union{Int,AbstractUnitRange{Int},Colon}...) | ||
1499 | _setindex!(IndexStyle(B), B, X, to_indices(B, (I0, I...))...) | ||
1500 | end | ||
1501 | |||
1502 | ## fill! contiguous views of BitArrays with a single value | ||
1503 | function fill!(V::SubArray{Bool, <:Any, <:BitArray, <:Tuple{AbstractUnitRange{Int}}}, x) | ||
1504 | B = V.parent | ||
1505 | I0 = V.indices[1] | ||
1506 | l0 = length(I0) | ||
1507 | l0 == 0 && return V | ||
1508 | fill_chunks!(B.chunks, Bool(x), first(I0), l0) | ||
1509 | return V | ||
1510 | end | ||
1511 | |||
1512 | fill!(V::SubArray{Bool, <:Any, <:BitArray, <:Tuple{AbstractUnitRange{Int}, Vararg{Union{Int,AbstractUnitRange{Int}}}}}, x) = | ||
1513 | _unsafe_fill_indices!(V.parent, x, V.indices...) | ||
1514 | |||
1515 | @generated function _unsafe_fill_indices!(B::BitArray, x, | ||
1516 | I0::AbstractUnitRange{Int}, I::Union{Int,AbstractUnitRange{Int}}...) | ||
1517 | N = length(I) | ||
1518 | quote | ||
1519 | y = Bool(x) | ||
1520 | idxlens = @ncall $N index_lengths I0 d->I[d] | ||
1521 | |||
1522 | f0 = indexoffset(I0)+1 | ||
1523 | l0 = idxlens[1] | ||
1524 | l0 == 0 && return B | ||
1525 | @nexprs $N d->(isempty(I[d]) && return B) | ||
1526 | |||
1527 | gap_lst_1 = 0 | ||
1528 | @nexprs $N d->(gap_lst_{d+1} = idxlens[d+1]) | ||
1529 | stride = 1 | ||
1530 | ind = f0 | ||
1531 | @nexprs $N d->begin | ||
1532 | stride *= size(B, d) | ||
1533 | stride_lst_d = stride | ||
1534 | ind += stride * indexoffset(I[d]) | ||
1535 | gap_lst_{d+1} *= stride | ||
1536 | end | ||
1537 | |||
1538 | @nloops($N, i, d->I[d], | ||
1539 | d->nothing, # PRE | ||
1540 | d->(ind += stride_lst_d - gap_lst_d), # POST | ||
1541 | fill_chunks!(B.chunks, y, ind, l0) # BODY | ||
1542 | ) | ||
1543 | |||
1544 | return B | ||
1545 | end | ||
1546 | end | ||
1547 | |||
1548 | ## isassigned | ||
1549 | |||
1550 | @generated function isassigned(B::BitArray, I_0::Int, I::Int...) | ||
1551 | N = length(I) | ||
1552 | quote | ||
1553 | @nexprs $N d->(I_d = I[d]) | ||
1554 | stride = 1 | ||
1555 | index = I_0 | ||
1556 | @nexprs $N d->begin | ||
1557 | l = size(B,d) | ||
1558 | stride *= l | ||
1559 | @boundscheck 1 <= I_{d-1} <= l || return false | ||
1560 | index += (I_d - 1) * stride | ||
1561 | end | ||
1562 | return isassigned(B, index) | ||
1563 | end | ||
1564 | end | ||
1565 | |||
1566 | isassigned(a::AbstractArray, i::CartesianIndex) = isassigned(a, Tuple(i)...) | ||
1567 | function isassigned(A::AbstractArray, i::Union{Integer, CartesianIndex}...) | ||
1568 | isa(i, Tuple{Vararg{Int}}) || return isassigned(A, CartesianIndex(to_indices(A, i))) | ||
1569 | @boundscheck checkbounds(Bool, A, i...) || return false | ||
1570 | S = IndexStyle(A) | ||
1571 | ninds = length(i) | ||
1572 | if (isa(S, IndexLinear) && ninds != 1) | ||
1573 | return @inbounds isassigned(A, _to_linear_index(A, i...)) | ||
1574 | elseif (!isa(S, IndexLinear) && ninds != ndims(A)) | ||
1575 | return @inbounds isassigned(A, _to_subscript_indices(A, i...)...) | ||
1576 | else | ||
1577 | try | ||
1578 | A[i...] | ||
1579 | true | ||
1580 | catch e | ||
1581 | if isa(e, BoundsError) || isa(e, UndefRefError) | ||
1582 | return false | ||
1583 | else | ||
1584 | rethrow() | ||
1585 | end | ||
1586 | end | ||
1587 | end | ||
1588 | end | ||
1589 | |||
1590 | ## permutedims | ||
1591 | |||
1592 | ## Permute array dims ## | ||
1593 | |||
1594 | function permutedims(B::StridedArray, perm) | ||
1595 | dimsB = size(B) | ||
1596 | ndimsB = length(dimsB) | ||
1597 | (ndimsB == length(perm) && isperm(perm)) || throw(ArgumentError("no valid permutation of dimensions")) | ||
1598 | dimsP = ntuple(i->dimsB[perm[i]], ndimsB)::typeof(dimsB) | ||
1599 | P = similar(B, dimsP) | ||
1600 | permutedims!(P, B, perm) | ||
1601 | end | ||
1602 | |||
1603 | function checkdims_perm(P::AbstractArray{TP,N}, B::AbstractArray{TB,N}, perm) where {TP,TB,N} | ||
1604 | indsB = axes(B) | ||
1605 | length(perm) == N || throw(ArgumentError("expected permutation of size $N, but length(perm)=$(length(perm))")) | ||
1606 | isperm(perm) || throw(ArgumentError("input is not a permutation")) | ||
1607 | indsP = axes(P) | ||
1608 | for i = 1:length(perm) | ||
1609 | indsP[i] == indsB[perm[i]] || throw(DimensionMismatch("destination tensor of incorrect size")) | ||
1610 | end | ||
1611 | nothing | ||
1612 | end | ||
1613 | |||
1614 | for (V, PT, BT) in Any[((:N,), BitArray, BitArray), ((:T,:N), Array, StridedArray)] | ||
1615 | @eval @generated function permutedims!(P::$PT{$(V...)}, B::$BT{$(V...)}, perm) where $(V...) | ||
1616 | quote | ||
1617 | checkdims_perm(P, B, perm) | ||
1618 | |||
1619 | #calculates all the strides | ||
1620 | native_strides = size_to_strides(1, size(B)...) | ||
1621 | strides_1 = 0 | ||
1622 | @nexprs $N d->(strides_{d+1} = native_strides[perm[d]]) | ||
1623 | |||
1624 | #Creates offset, because indexing starts at 1 | ||
1625 | offset = 1 - sum(@ntuple $N d->strides_{d+1}) | ||
1626 | |||
1627 | sumc = 0 | ||
1628 | ind = 1 | ||
1629 | @nloops($N, i, P, | ||
1630 | d->(sumc += i_d*strides_{d+1}), # PRE | ||
1631 | d->(sumc -= i_d*strides_{d+1}), # POST | ||
1632 | begin # BODY | ||
1633 | @inbounds P[ind] = B[sumc+offset] | ||
1634 | ind += 1 | ||
1635 | end) | ||
1636 | |||
1637 | return P | ||
1638 | end | ||
1639 | end | ||
1640 | end | ||
1641 | |||
1642 | ## unique across dim | ||
1643 | |||
1644 | # TODO: this doesn't fit into the new hashing scheme in any obvious way | ||
1645 | |||
1646 | struct Prehashed | ||
1647 | hash::UInt | ||
1648 | end | ||
1649 | hash(x::Prehashed) = x.hash | ||
1650 | |||
1651 | """ | ||
1652 | unique(A::AbstractArray; dims::Int) | ||
1653 | |||
1654 | Return unique regions of `A` along dimension `dims`. | ||
1655 | |||
1656 | # Examples | ||
1657 | ```jldoctest | ||
1658 | julia> A = map(isodd, reshape(Vector(1:8), (2,2,2))) | ||
1659 | 2×2×2 Array{Bool, 3}: | ||
1660 | [:, :, 1] = | ||
1661 | 1 1 | ||
1662 | 0 0 | ||
1663 | |||
1664 | [:, :, 2] = | ||
1665 | 1 1 | ||
1666 | 0 0 | ||
1667 | |||
1668 | julia> unique(A) | ||
1669 | 2-element Vector{Bool}: | ||
1670 | 1 | ||
1671 | 0 | ||
1672 | |||
1673 | julia> unique(A, dims=2) | ||
1674 | 2×1×2 Array{Bool, 3}: | ||
1675 | [:, :, 1] = | ||
1676 | 1 | ||
1677 | 0 | ||
1678 | |||
1679 | [:, :, 2] = | ||
1680 | 1 | ||
1681 | 0 | ||
1682 | |||
1683 | julia> unique(A, dims=3) | ||
1684 | 2×2×1 Array{Bool, 3}: | ||
1685 | [:, :, 1] = | ||
1686 | 1 1 | ||
1687 | 0 0 | ||
1688 | ``` | ||
1689 | """ | ||
1690 | unique(A::AbstractArray; dims::Union{Colon,Integer} = :) = _unique_dims(A, dims) | ||
1691 | |||
1692 | _unique_dims(A::AbstractArray, dims::Colon) = invoke(unique, Tuple{Any}, A) | ||
1693 | |||
1694 | @generated function _unique_dims(A::AbstractArray{T,N}, dim::Integer) where {T,N} | ||
1695 | quote | ||
1696 | 1 <= dim <= $N || return copy(A) | ||
1697 | hashes = zeros(UInt, axes(A, dim)) | ||
1698 | |||
1699 | # Compute hash for each row | ||
1700 | k = 0 | ||
1701 | @nloops $N i A d->(if d == dim; k = i_d; end) begin | ||
1702 | @inbounds hashes[k] = hash(hashes[k], hash((@nref $N A i))) | ||
1703 | end | ||
1704 | |||
1705 | # Collect index of first row for each hash | ||
1706 | uniquerow = similar(Array{Int}, axes(A, dim)) | ||
1707 | firstrow = Dict{Prehashed,Int}() | ||
1708 | for k = axes(A, dim) | ||
1709 | uniquerow[k] = get!(firstrow, Prehashed(hashes[k]), k) | ||
1710 | end | ||
1711 | uniquerows = collect(values(firstrow)) | ||
1712 | |||
1713 | # Check for collisions | ||
1714 | collided = falses(axes(A, dim)) | ||
1715 | @inbounds begin | ||
1716 | @nloops $N i A d->(if d == dim | ||
1717 | k = i_d | ||
1718 | j_d = uniquerow[k] | ||
1719 | else | ||
1720 | j_d = i_d | ||
1721 | end) begin | ||
1722 | if !isequal((@nref $N A j), (@nref $N A i)) | ||
1723 | collided[k] = true | ||
1724 | end | ||
1725 | end | ||
1726 | end | ||
1727 | |||
1728 | if any(collided) | ||
1729 | nowcollided = similar(BitArray, axes(A, dim)) | ||
1730 | while any(collided) | ||
1731 | # Collect index of first row for each collided hash | ||
1732 | empty!(firstrow) | ||
1733 | for j = axes(A, dim) | ||
1734 | collided[j] || continue | ||
1735 | uniquerow[j] = get!(firstrow, Prehashed(hashes[j]), j) | ||
1736 | end | ||
1737 | for v in values(firstrow) | ||
1738 | push!(uniquerows, v) | ||
1739 | end | ||
1740 | |||
1741 | # Check for collisions | ||
1742 | fill!(nowcollided, false) | ||
1743 | @nloops $N i A d->begin | ||
1744 | if d == dim | ||
1745 | k = i_d | ||
1746 | j_d = uniquerow[k] | ||
1747 | (!collided[k] || j_d == k) && continue | ||
1748 | else | ||
1749 | j_d = i_d | ||
1750 | end | ||
1751 | end begin | ||
1752 | if !isequal((@nref $N A j), (@nref $N A i)) | ||
1753 | nowcollided[k] = true | ||
1754 | end | ||
1755 | end | ||
1756 | (collided, nowcollided) = (nowcollided, collided) | ||
1757 | end | ||
1758 | end | ||
1759 | |||
1760 | @nref $N A d->d == dim ? sort!(uniquerows) : (axes(A, d)) | ||
1761 | end | ||
1762 | end | ||
1763 | |||
1764 | # Show for pairs() with Cartesian indices. Needs to be here rather than show.jl for bootstrap order | ||
1765 | function Base.showarg(io::IO, r::Iterators.Pairs{<:Integer, <:Any, <:Any, T}, toplevel) where T <: Union{AbstractVector, Tuple} | ||
1766 | print(io, "pairs(::$T)") | ||
1767 | end | ||
1768 | function Base.showarg(io::IO, r::Iterators.Pairs{<:CartesianIndex, <:Any, <:Any, T}, toplevel) where T <: AbstractArray | ||
1769 | print(io, "pairs(::$T)") | ||
1770 | end | ||
1771 | |||
1772 | function Base.showarg(io::IO, r::Iterators.Pairs{<:CartesianIndex, <:Any, <:Any, T}, toplevel) where T<:AbstractVector | ||
1773 | print(io, "pairs(IndexCartesian(), ::$T)") | ||
1774 | end | ||
1775 | |||
1776 | ## sortslices | ||
1777 | |||
1778 | """ | ||
1779 | sortslices(A; dims, alg::Algorithm=DEFAULT_UNSTABLE, lt=isless, by=identity, rev::Bool=false, order::Ordering=Forward) | ||
1780 | |||
1781 | Sort slices of an array `A`. The required keyword argument `dims` must | ||
1782 | be either an integer or a tuple of integers. It specifies the | ||
1783 | dimension(s) over which the slices are sorted. | ||
1784 | |||
1785 | E.g., if `A` is a matrix, `dims=1` will sort rows, `dims=2` will sort columns. | ||
1786 | Note that the default comparison function on one dimensional slices sorts | ||
1787 | lexicographically. | ||
1788 | |||
1789 | For the remaining keyword arguments, see the documentation of [`sort!`](@ref). | ||
1790 | |||
1791 | # Examples | ||
1792 | ```jldoctest | ||
1793 | julia> sortslices([7 3 5; -1 6 4; 9 -2 8], dims=1) # Sort rows | ||
1794 | 3×3 Matrix{Int64}: | ||
1795 | -1 6 4 | ||
1796 | 7 3 5 | ||
1797 | 9 -2 8 | ||
1798 | |||
1799 | julia> sortslices([7 3 5; -1 6 4; 9 -2 8], dims=1, lt=(x,y)->isless(x[2],y[2])) | ||
1800 | 3×3 Matrix{Int64}: | ||
1801 | 9 -2 8 | ||
1802 | 7 3 5 | ||
1803 | -1 6 4 | ||
1804 | |||
1805 | julia> sortslices([7 3 5; -1 6 4; 9 -2 8], dims=1, rev=true) | ||
1806 | 3×3 Matrix{Int64}: | ||
1807 | 9 -2 8 | ||
1808 | 7 3 5 | ||
1809 | -1 6 4 | ||
1810 | |||
1811 | julia> sortslices([7 3 5; 6 -1 -4; 9 -2 8], dims=2) # Sort columns | ||
1812 | 3×3 Matrix{Int64}: | ||
1813 | 3 5 7 | ||
1814 | -1 -4 6 | ||
1815 | -2 8 9 | ||
1816 | |||
1817 | julia> sortslices([7 3 5; 6 -1 -4; 9 -2 8], dims=2, alg=InsertionSort, lt=(x,y)->isless(x[2],y[2])) | ||
1818 | 3×3 Matrix{Int64}: | ||
1819 | 5 3 7 | ||
1820 | -4 -1 6 | ||
1821 | 8 -2 9 | ||
1822 | |||
1823 | julia> sortslices([7 3 5; 6 -1 -4; 9 -2 8], dims=2, rev=true) | ||
1824 | 3×3 Matrix{Int64}: | ||
1825 | 7 5 3 | ||
1826 | 6 -4 -1 | ||
1827 | 9 8 -2 | ||
1828 | ``` | ||
1829 | |||
1830 | # Higher dimensions | ||
1831 | |||
1832 | `sortslices` extends naturally to higher dimensions. E.g., if `A` is a | ||
1833 | a 2x2x2 array, `sortslices(A, dims=3)` will sort slices within the 3rd dimension, | ||
1834 | passing the 2x2 slices `A[:, :, 1]` and `A[:, :, 2]` to the comparison function. | ||
1835 | Note that while there is no default order on higher-dimensional slices, you may | ||
1836 | use the `by` or `lt` keyword argument to specify such an order. | ||
1837 | |||
1838 | If `dims` is a tuple, the order of the dimensions in `dims` is | ||
1839 | relevant and specifies the linear order of the slices. E.g., if `A` is three | ||
1840 | dimensional and `dims` is `(1, 2)`, the orderings of the first two dimensions | ||
1841 | are re-arranged such that the slices (of the remaining third dimension) are sorted. | ||
1842 | If `dims` is `(2, 1)` instead, the same slices will be taken, | ||
1843 | but the result order will be row-major instead. | ||
1844 | |||
1845 | # Higher dimensional examples | ||
1846 | ``` | ||
1847 | julia> A = permutedims(reshape([4 3; 2 1; 'A' 'B'; 'C' 'D'], (2, 2, 2)), (1, 3, 2)) | ||
1848 | 2×2×2 Array{Any, 3}: | ||
1849 | [:, :, 1] = | ||
1850 | 4 3 | ||
1851 | 2 1 | ||
1852 | |||
1853 | [:, :, 2] = | ||
1854 | 'A' 'B' | ||
1855 | 'C' 'D' | ||
1856 | |||
1857 | julia> sortslices(A, dims=(1,2)) | ||
1858 | 2×2×2 Array{Any, 3}: | ||
1859 | [:, :, 1] = | ||
1860 | 1 3 | ||
1861 | 2 4 | ||
1862 | |||
1863 | [:, :, 2] = | ||
1864 | 'D' 'B' | ||
1865 | 'C' 'A' | ||
1866 | |||
1867 | julia> sortslices(A, dims=(2,1)) | ||
1868 | 2×2×2 Array{Any, 3}: | ||
1869 | [:, :, 1] = | ||
1870 | 1 2 | ||
1871 | 3 4 | ||
1872 | |||
1873 | [:, :, 2] = | ||
1874 | 'D' 'C' | ||
1875 | 'B' 'A' | ||
1876 | |||
1877 | julia> sortslices(reshape([5; 4; 3; 2; 1], (1,1,5)), dims=3, by=x->x[1,1]) | ||
1878 | 1×1×5 Array{Int64, 3}: | ||
1879 | [:, :, 1] = | ||
1880 | 1 | ||
1881 | |||
1882 | [:, :, 2] = | ||
1883 | 2 | ||
1884 | |||
1885 | [:, :, 3] = | ||
1886 | 3 | ||
1887 | |||
1888 | [:, :, 4] = | ||
1889 | 4 | ||
1890 | |||
1891 | [:, :, 5] = | ||
1892 | 5 | ||
1893 | ``` | ||
1894 | """ | ||
1895 | function sortslices(A::AbstractArray; dims::Union{Integer, Tuple{Vararg{Integer}}}, kws...) | ||
1896 | _sortslices(A, Val{dims}(); kws...) | ||
1897 | end | ||
1898 | |||
1899 | # Works around inference's lack of ability to recognize partial constness | ||
1900 | struct DimSelector{dims, T} | ||
1901 | A::T | ||
1902 | end | ||
1903 | DimSelector{dims}(x::T) where {dims, T} = DimSelector{dims, T}(x) | ||
1904 | (ds::DimSelector{dims, T})(i) where {dims, T} = i in dims ? axes(ds.A, i) : (:,) | ||
1905 | |||
1906 | _negdims(n, dims) = filter(i->!(i in dims), 1:n) | ||
1907 | |||
1908 | function compute_itspace(A, ::Val{dims}) where {dims} | ||
1909 | negdims = _negdims(ndims(A), dims) | ||
1910 | axs = Iterators.product(ntuple(DimSelector{dims}(A), ndims(A))...) | ||
1911 | vec(permutedims(collect(axs), (dims..., negdims...))) | ||
1912 | end | ||
1913 | |||
1914 | function _sortslices(A::AbstractArray, d::Val{dims}; kws...) where dims | ||
1915 | itspace = compute_itspace(A, d) | ||
1916 | vecs = map(its->view(A, its...), itspace) | ||
1917 | p = sortperm(vecs; kws...) | ||
1918 | if ndims(A) == 2 && isa(dims, Integer) && isa(A, Array) | ||
1919 | # At the moment, the performance of the generic version is subpar | ||
1920 | # (about 5x slower). Hardcode a fast-path until we're able to | ||
1921 | # optimize this. | ||
1922 | return dims == 1 ? A[p, :] : A[:, p] | ||
1923 | else | ||
1924 | B = similar(A) | ||
1925 | for (x, its) in zip(p, itspace) | ||
1926 | B[its...] = vecs[x] | ||
1927 | end | ||
1928 | B | ||
1929 | end | ||
1930 | end | ||
1931 | |||
1932 | getindex(b::Ref, ::CartesianIndex{0}) = getindex(b) | ||
1933 | setindex!(b::Ref, x, ::CartesianIndex{0}) = setindex!(b, x) |