Skip to content

Commit 16fc41c

Browse files
Merge #756
756: Change `DepthwiseConv()` to use `in=>out` instead of `in=>mult`. r=MikeInnes a=staticfloat This is an API change, but I think it makes more sense, and is more consistent with our `Conv()` API. This also dumps the `DepthwiseConv((3,3), C_in)` API, as I'm not sure why you would want to specify only the input channel count and default the output to a channel multiplier of 1; if anything I would think you'd want to specify the channel output and leave the input to be default. In any case, I think consistency with `Conv()` is the best thing to chase after here. Co-authored-by: Elliot Saba <staticfloat@gmail.com>
2 parents 5931b93 + 06da965 commit 16fc41c

File tree

4 files changed

+28
-32
lines changed

4 files changed

+28
-32
lines changed

NEWS.md

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,6 @@
1+
# v0.9.0
2+
* [Depthwise comvolutional layer API changes](https://github.com/FluxML/Flux.jl/pull/756) from `in => mult` channel specification to `in => out` channel specification, and deprecates implicit `out` constructor.
3+
14
# v0.8.0
25

36
* New [ConvTranspose layer](https://github.com/FluxML/Flux.jl/pull/311).

src/layers/conv.jl

Lines changed: 19 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -136,18 +136,17 @@ end
136136
(a::ConvTranspose{<:Any,<:Any,W})(x::AbstractArray{<:Real}) where {T <: Union{Float32,Float64}, W <: AbstractArray{T}} =
137137
a(T.(x))
138138
"""
139-
DepthwiseConv(size, in)
140-
DepthwiseConv(size, in=>mul)
141-
DepthwiseConv(size, in=>mul, relu)
139+
DepthwiseConv(size, in=>out)
140+
DepthwiseConv(size, in=>out, relu)
142141
143142
Depthwise convolutional layer. `size` should be a tuple like `(2, 2)`.
144-
`in` and `mul` specify the number of input channels and channel multiplier respectively.
145-
In case the `mul` is not specified it is taken as 1.
143+
`in` and `out` specify the number of input and output channels respectively.
144+
Note that `out` must be an integer multiple of `in`.
146145
147146
Data should be stored in WHCN order. In other words, a 100×100 RGB image would
148147
be a `100×100×3` array, and a batch of 50 would be a `100×100×3×50` array.
149148
150-
Takes the keyword arguments `pad` and `stride`.
149+
Takes the keyword arguments `pad`, `stride` and `dilation`.
151150
"""
152151
struct DepthwiseConv{N,M,F,A,V}
153152
σ::F
@@ -166,17 +165,18 @@ function DepthwiseConv(w::AbstractArray{T,N}, b::AbstractVector{T}, σ = identit
166165
return DepthwiseConv(σ, w, b, stride, pad, dilation)
167166
end
168167

169-
DepthwiseConv(k::NTuple{N,Integer}, ch::Integer, σ = identity; init = glorot_uniform,
170-
stride = 1, pad = 0, dilation = 1) where N =
171-
DepthwiseConv(param(init(k..., 1, ch)), param(zeros(ch)), σ,
172-
stride = stride, pad = pad, dilation=dilation)
173-
174-
DepthwiseConv(k::NTuple{N,Integer}, ch::Pair{<:Integer,<:Integer}, σ = identity; init = glorot_uniform,
175-
stride::NTuple{N,Integer} = map(_->1,k),
176-
pad::NTuple{N,Integer} = map(_->0,2 .* k),
177-
dilation::NTuple{N,Integer} = map(_->1,k)) where N =
178-
DepthwiseConv(param(init(k..., ch[2], ch[1])), param(zeros(ch[2]*ch[1])), σ,
179-
stride = stride, pad = pad)
168+
function DepthwiseConv(k::NTuple{N,Integer}, ch::Pair{<:Integer,<:Integer}, σ = identity;
169+
init = glorot_uniform, stride = 1, pad = 0, dilation = 1) where N
170+
@assert ch[2] % ch[1] == 0 "Output channels must be integer multiple of input channels"
171+
return DepthwiseConv(
172+
param(init(k..., div(ch[2], ch[1]), ch[1])),
173+
param(zeros(ch[2])),
174+
σ;
175+
stride = stride,
176+
pad = pad,
177+
dilation = dilation
178+
)
179+
end
180180

181181
@treelike DepthwiseConv
182182

@@ -187,8 +187,8 @@ function (c::DepthwiseConv)(x)
187187
end
188188

189189
function Base.show(io::IO, l::DepthwiseConv)
190-
print(io, "DepthwiseConv(", size(l.weight)[1:ndims(l.weight)-2])
191-
print(io, ", ", size(l.weight, ndims(l.weight)), "=>", size(l.weight, ndims(l.weight)-1))
190+
print(io, "DepthwiseConv(", size(l.weight)[1:end-2])
191+
print(io, ", ", size(l.weight)[end], "=>", prod(size(l.weight)[end-1:end]))
192192
l.σ == identity || print(io, ", ", l.σ)
193193
print(io, ")")
194194
end

test/layers/conv.jl

Lines changed: 6 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -39,20 +39,14 @@ end
3939

4040
@testset "Depthwise Conv" begin
4141
r = zeros(Float32, 28, 28, 3, 5)
42-
m1 = DepthwiseConv((2, 2), 3=>5)
42+
m1 = DepthwiseConv((2, 2), 3=>15)
4343
@test size(m1(r), 3) == 15
44-
m2 = DepthwiseConv((2, 2), 3)
45-
@test size(m2(r), 3) == 3
4644

47-
x = zeros(Float64, 28, 28, 3, 5)
48-
49-
m3 = DepthwiseConv((2, 2), 3 => 5)
50-
51-
@test size(m3(r), 3) == 15
52-
53-
m4 = DepthwiseConv((2, 2), 3)
54-
55-
@test size(m4(r), 3) == 3
45+
m3 = DepthwiseConv((2, 3), 3=>9)
46+
@test size(m3(r), 3) == 9
47+
48+
# Test that we cannot ask for non-integer multiplication factors
49+
@test_throws AssertionError DepthwiseConv((2,2), 3=>10)
5650
end
5751

5852
@testset "ConvTranspose" begin

test/layers/normalisation.jl

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -252,7 +252,6 @@ end
252252
@test !m.active
253253

254254
x′ = m(x).data
255-
println(x′[1])
256255
@test isapprox(x′[1], (1 - 0.95) / sqrt(1.25 + 1f-5), atol = 1.0e-5)
257256
end
258257
# with activation function

0 commit comments

Comments
 (0)