|
198 | 198 |
|
199 | 199 | (a::DepthwiseConv{<:Any,<:Any,W})(x::AbstractArray{<:Real}) where {T <: Union{Float32,Float64}, W <: AbstractArray{T}} = |
200 | 200 | a(T.(x)) |
| 201 | +""" |
| 202 | + CrossCor(size, in=>out) |
| 203 | + CrossCor(size, in=>out, relu) |
| 204 | + |
| 205 | +Standard cross convolutional layer. `size` should be a tuple like `(2, 2)`. |
| 206 | +`in` and `out` specify the number of input and output channels respectively. |
| 207 | + |
| 208 | +Example: Applying CrossCor layer to a 1-channel input using a 2x2 window size, |
| 209 | + giving us a 16-channel output. Output is activated with ReLU. |
| 210 | + |
| 211 | + size = (2,2) |
| 212 | + in = 1 |
| 213 | + out = 16 |
| 214 | + CrossCor((2, 2), 1=>16, relu) |
| 215 | + |
| 216 | +Data should be stored in WHCN order (width, height, # channels, # batches). |
| 217 | +In other words, a 100×100 RGB image would be a `100×100×3×1` array, |
| 218 | +and a batch of 50 would be a `100×100×3×50` array. |
| 219 | + |
| 220 | +Takes the keyword arguments `pad`, `stride` and `dilation`. |
| 221 | +""" |
| 222 | +struct CrossCor{N,M,F,A,V} |
| 223 | + σ::F |
| 224 | + weight::A |
| 225 | + bias::V |
| 226 | + stride::NTuple{N,Int} |
| 227 | + pad::NTuple{M,Int} |
| 228 | + dilation::NTuple{N,Int} |
| 229 | +end |
| 230 | + |
| 231 | +function CrossCor(w::AbstractArray{T,N}, b::AbstractVector{T}, σ = identity; |
| 232 | + stride = 1, pad = 0, dilation = 1) where {T,N} |
| 233 | + stride = expand(Val(N-2), stride) |
| 234 | + pad = expand(Val(2*(N-2)), pad) |
| 235 | + dilation = expand(Val(N-2), dilation) |
| 236 | + return CrossCor(σ, w, b, stride, pad, dilation) |
| 237 | +end |
| 238 | + |
| 239 | +CrossCor(k::NTuple{N,Integer}, ch::Pair{<:Integer,<:Integer}, σ = identity; |
| 240 | + init = glorot_uniform, stride = 1, pad = 0, dilation = 1) where N = |
| 241 | + CrossCor(param(init(k..., ch...)), param(zeros(ch[2])), σ, |
| 242 | + stride = stride, pad = pad, dilation = dilation) |
| 243 | + |
| 244 | +@treelike CrossCor |
| 245 | + |
| 246 | +function crosscor(x, w, ddims::DenseConvDims) |
| 247 | + ddims = DenseConvDims(ddims, F=true) |
| 248 | + return conv(x, w, ddims) |
| 249 | +end |
| 250 | + |
| 251 | +function (c::CrossCor)(x::AbstractArray) |
| 252 | + # TODO: breaks gpu broadcast :( |
| 253 | + # ndims(x) == ndims(c.weight)-1 && return squeezebatch(c(reshape(x, size(x)..., 1))) |
| 254 | + σ, b = c.σ, reshape(c.bias, map(_->1, c.stride)..., :, 1) |
| 255 | + cdims = DenseConvDims(x, c.weight; stride=c.stride, padding=c.pad, dilation=c.dilation) |
| 256 | + σ.(crosscor(x, c.weight, cdims) .+ b) |
| 257 | +end |
| 258 | + |
| 259 | +function Base.show(io::IO, l::CrossCor) |
| 260 | + print(io, "CrossCor(", size(l.weight)[1:ndims(l.weight)-2]) |
| 261 | + print(io, ", ", size(l.weight, ndims(l.weight)-1), "=>", size(l.weight, ndims(l.weight))) |
| 262 | + l.σ == identity || print(io, ", ", l.σ) |
| 263 | + print(io, ")") |
| 264 | +end |
| 265 | + |
| 266 | +(a::CrossCor{<:Any,<:Any,W})(x::AbstractArray{T}) where {T <: Union{Float32,Float64}, W <: AbstractArray{T}} = |
| 267 | + invoke(a, Tuple{AbstractArray}, x) |
| 268 | + |
| 269 | +(a::CrossCor{<:Any,<:Any,W})(x::AbstractArray{<:Real}) where {T <: Union{Float32,Float64}, W <: AbstractArray{T}} = |
| 270 | + a(T.(x)) |
201 | 271 |
|
202 | 272 | """ |
203 | 273 | MaxPool(k) |
|
0 commit comments