Skip to content

restructure package #4

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Jun 29, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
A `Julia` implementation of the Fourier Neural Operator conceived by [Zongyi et al.](https://arxiv.org/abs/2010.08895)
using [Flux.jl](https://github.com/FluxML/Flux.jl) and [FFTW.jl](https://github.com/JuliaMath/FFTW.jl).

The implementation of the layers is influenced heavily by the basic layers provided in the [Flux.jl](https://github.com/FluxML/Flux.jl) package.

## Installation

Expand Down
68 changes: 68 additions & 0 deletions src/FourierLayer.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,68 @@
"""
FourierLayer(in, out, σ=identity, init=glorot_uniform)
FourierLayer(W::AbstractMatrix, [bias, σ])

Create a Layer of the Fourier Neural Operator as proposed by Zongyi et al.
arXiv: 2010.08895

The input `x` should be a vector of length `in`, or batch of vectors represented
as an `in × N` matrix, or any array with `size(x,1) == in`.
The out `y` will be a vector of length `out`, or a batch with
`size(y) == (out, size(x)[2:end]...)`

You can specify biases for the paths as you like, though the convolutional path is
originally not intended to perform an affine transformation.
"""
# Create the data structure
struct FourierLayer{F, Mf<:AbstractMatrix, Ml<:AbstractMatrix, Bf, Bl}
weight_f::Mf
weight_l::Ml
bias_f::Bf
bias_l::Bl
σ::F
# Constructor for the entire fourier layer
function FourierLayer(Wf::Mf, Wl::Ml, bias_l = true, bias_f = true, σ::F = identity) where {Mf<:AbstractMatrix, Ml<:AbstractMatrix, F}
bf = Flux.create_bias(Wf, bias_f, size(Wf,1))
bl = Flux.create_bias(Wl, bias_l, size(Wl, 1))
new{F,Mf,Ml,typeof(bf),typeof(bl)}(Wf, Wl, bf, bl, σ)
end
end

# Declare the function that assigns Weights and biases to the layer
function FourierLayer(in::Integer, out::Integer, σ = identity;
init = Flux.glorot_uniform, bias_linear=true, bias_fourier=true)

Wf = init(floor(Int, in / 2)+1, floor(Int, in / 2)+1)
Wl = init(out, in)

bf = bias_linear
bl = bias_fourier

return FourierLayer(Wf, Wl, bf, bl, σ)
end

Flux.@functor FourierLayer

# The actual layer that does stuff
function (a::FourierLayer)(x::AbstractVecOrMat)
# Assign the parameters
Wf, Wl, bf, bl, σ = a.weight_f, a.weight_l, a.bias_f, a.bias_l, a.σ
# The linear path
linear = Wl * x .+ bl
# The convolution path
fourier = irfft((Wf * rfft(x) .+ bf), length(x))
# Return the activated sum
return σ.(linear + fourier)
end

# Overload function to deal with higher-dimensional input arrays
(a::FourierLayer)(x::AbstractArray) = reshape(a(reshape(x, size(x, 1), :)), :, size(x)[2:end]...)

# Print nicely
function Base.show(io::IO, l::FourierLayer)
print(io, "FourierLayer with\nConvolution path: (", size(l.weight_f, 2), ", ", size(l.weight_f, 1))
print(io, ")\n")
print(io, "Linear path: (", size(l.weight_l, 2), ", ", size(l.weight_l, 1))
print(io, ")\n")
l.σ == identity || print(io, "Activation: ", l.σ)
end
53 changes: 2 additions & 51 deletions src/NeuralOperator.jl
Original file line number Diff line number Diff line change
Expand Up @@ -4,57 +4,8 @@ using Base: Integer, ident_cmp
using Flux
using FFTW

# Create the data structure
struct FourierLayer{F, Mf<:AbstractMatrix, Ml<:AbstractMatrix, Bf, Bl}
weight_f::Mf
weight_l::Ml
bias_f::Bf
bias_l::Bl
σ::F
# Constructor for the entire fourier layer
function FourierLayer(Wf::Mf, Wl::Ml, bias_l = true, bias_f = true, σ::F = identity) where {Mf<:AbstractMatrix, Ml<:AbstractMatrix, F}
bf = Flux.create_bias(Wf, bias_f, size(Wf,1))
bl = Flux.create_bias(Wl, bias_l, size(Wl, 1))
new{F,Mf,Ml,typeof(bf),typeof(bl)}(Wf, Wl, bf, bl, σ)
end
end
export FourierLayer

# Declare the function that assigns Weights and biases to the layer
function FourierLayer(in::Integer, out::Integer, σ = identity;
init = Flux.glorot_uniform, bias_linear=true, bias_fourier=true)

Wf = init(floor(Int, in / 2)+1, floor(Int, in / 2)+1)
Wl = init(out, in)

bf = bias_linear
bl = bias_fourier

return FourierLayer(Wf, Wl, bf, bl, σ)
end

Flux.@functor FourierLayer

# The actual layer that does stuff
function (a::FourierLayer)(x::AbstractVecOrMat)
# Assign the parameters
Wf, Wl, bf, bl, σ = a.weight_f, a.weight_l, a.bias_f, a.bias_l, a.σ
# The linear path
linear = Wl * x .+ bl
# The convolution path
fourier = irfft((Wf * rfft(x) .+ bf), length(x))
return σ.(linear + fourier)
end

# What even is this?
(a::FourierLayer)(x::AbstractArray) = reshape(a(reshape(x, size(x, 1), :)), :, size(x)[2:end]...)

# Print nicely
function Base.show(io::IO, l::FourierLayer)
print(io, "FourierLayer with\nConvolution path: (", size(l.weight_f, 2), ", ", size(l.weight_f, 1))
print(io, ")\n")
print(io, "Linear path: (", size(l.weight_l, 2), ", ", size(l.weight_l, 1))
print(io, ")\n")
l.σ == identity || print(io, "Activation: ", l.σ)
end
include("FourierLayer.jl")

end # module