Skip to content

Commit

Permalink
Merge pull request #1043 from gridap/optimisations
Browse files Browse the repository at this point in the history
Optimise allocations
  • Loading branch information
JordiManyer authored Dec 15, 2024
2 parents 3b87738 + e279910 commit 17a4f2e
Show file tree
Hide file tree
Showing 11 changed files with 65 additions and 29 deletions.
5 changes: 5 additions & 0 deletions NEWS.md
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0

- Added AMR-related methods `mark` and `estimate` to `Adaptivity` module. Implemented Dorfler marking strategy. Since PR[#1063](https://github.com/gridap/Gridap.jl/pull/1063).

### Changed

- Low level optimisations to reduce allocations. `AffineMap` renamed to `AffineField`. New `AffineMap <: Map`, doing the same as `AffineField` without struct allocation. New `ConstantMap <: Map`, doing the same as `ConstantField` without struct allocation. Since PR[#1043](https://github.com/gridap/Gridap.jl/pull/1043).

## [0.18.8] - 2024-12-2

### Added
Expand All @@ -25,6 +29,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Fixed issue where barycentric refinement rule in 3D would not produce oriented meshes. Since PR[#1055](https://github.com/gridap/Gridap.jl/pull/1055).

### Changed

- Optimized MonomialBasis low-level functions. Since PR[#1059](https://github.com/gridap/Gridap.jl/pull/1059).

## [0.18.7] - 2024-10-8
Expand Down
2 changes: 1 addition & 1 deletion src/Adaptivity/RefinementRules.jl
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ end
# - The reason why we are saving both the cell maps and the inverse cell maps is to avoid recomputing
# them when needed. This is needed for performance when the RefinementRule is used for MacroFEs.
# Also, in the case the ref_grid comes from a CartesianGrid, we save the cell maps as
# AffineMaps, which are more efficient than the default linear_combinations.
# AffineFields, which are more efficient than the default linear_combinations.
# - We cannot parametrise the RefinementRule by all it's fields, because we will have different types of
# RefinementRules in a single mesh. It's the same reason why we don't parametrise the ReferenceFE type.

Expand Down
49 changes: 35 additions & 14 deletions src/Fields/AffineMaps.jl
Original file line number Diff line number Diff line change
@@ -1,34 +1,47 @@

struct AffineMap <: Map end

function return_cache(::AffineMap,G::TensorValue{D1,D2},y0::Point{D2},x::Point{D1}) where {D1,D2}
nothing
end

function evaluate!(
cache,::AffineMap,G::TensorValue{D1,D2},y0::Point{D2},x::Point{D1}
) where {D1,D2}
xG + y0
end


"""
A Field with this form
y = x⋅G + y0
"""
struct AffineMap{D1,D2,T,L} <:Field
struct AffineField{D1,D2,T,L} <: Field
gradient::TensorValue{D1,D2,T,L}
origin::Point{D2,T}
function AffineMap(
function AffineField(
gradient::TensorValue{D1,D2,T,L},
origin::Point{D2,T}) where {D1,D2,T,L}

new{D1,D2,T,L}(gradient,origin)
end
end

affine_map(gradient,origin) = AffineMap(gradient,origin)
affine_map(gradient,origin) = AffineField(gradient,origin)

function evaluate!(cache,f::AffineMap,x::Point)
function evaluate!(cache,f::AffineField,x::Point)
G = f.gradient
y0 = f.origin
xG + y0
end

function return_cache(f::AffineMap,x::AbstractVector{<:Point})
function return_cache(f::AffineField,x::AbstractVector{<:Point})
T = return_type(f,testitem(x))
y = similar(x,T,size(x))
CachedArray(y)
end

function evaluate!(cache,f::AffineMap,x::AbstractVector{<:Point})
function evaluate!(cache,f::AffineField,x::AbstractVector{<:Point})
setsize!(cache,size(x))
y = cache.array
G = f.gradient
Expand All @@ -41,11 +54,11 @@ function evaluate!(cache,f::AffineMap,x::AbstractVector{<:Point})
y
end

function gradient(h::AffineMap)
function gradient(h::AffineField)
ConstantField(h.gradient)
end

function push_∇∇(∇∇a::Field::AffineMap)
function push_∇∇(∇∇a::Field::AffineField)
# Assuming ϕ is affine map
Jt = (ϕ)
Jt_inv = pinvJt(Jt)
Expand Down Expand Up @@ -80,7 +93,7 @@ end
function lazy_map(
k::Broadcasting{typeof(push_∇∇)},
cell_∇∇a::AbstractArray,
cell_map::AbstractArray{<:AffineMap})
cell_map::AbstractArray{<:AffineField})
cell_Jt = lazy_map(∇,cell_map)
cell_invJt = lazy_map(Operation(pinvJt),cell_Jt)
lazy_map(Broadcasting(Operation(push_∇∇)),cell_∇∇a,cell_invJt)
Expand All @@ -89,28 +102,36 @@ end
function lazy_map(
k::Broadcasting{typeof(push_∇∇)},
cell_∇∇at::LazyArray{<:Fill{typeof(transpose)}},
cell_map::AbstractArray{<:AffineMap})
cell_map::AbstractArray{<:AffineField})
cell_∇∇a = cell_∇∇at.args[1]
cell_∇∇b = lazy_map(k,cell_∇∇a,cell_map)
cell_∇∇bt = lazy_map(transpose,cell_∇∇b)
cell_∇∇bt
end

function inverse_map(f::AffineMap)
function inverse_map(f::AffineField)
Jt = f.gradient
y0 = f.origin
invJt = pinvJt(Jt)
x0 = -y0invJt
AffineMap(invJt,x0)
AffineField(invJt,x0)
end

function lazy_map(::typeof(∇),a::LazyArray{<:Fill{typeof(affine_map)}})
gradients = a.args[1]
lazy_map(constant_field,gradients)
end

function Base.zero(::Type{<:AffineMap{D1,D2,T}}) where {D1,D2,T}
function lazy_map(
::typeof(evaluate),a::LazyArray{<:Fill{typeof(affine_map)}},x::AbstractArray
)
gradients = a.args[1]
origins = a.args[2]
lazy_map(Broadcasting(AffineMap()),gradients,origins,x)
end

function Base.zero(::Type{<:AffineField{D1,D2,T}}) where {D1,D2,T}
gradient = TensorValue{D1,D2}(tfill(zero(T),Val{D1*D2}()))
origin = Point{D2,T}(tfill(zero(T),Val{D2}()))
AffineMap(gradient,origin)
AffineField(gradient,origin)
end
4 changes: 2 additions & 2 deletions src/Fields/ApplyOptimizations.jl
Original file line number Diff line number Diff line change
Expand Up @@ -135,14 +135,14 @@ function lazy_map(
k::Broadcasting{typeof(∇)}, a::LazyArray{<:Fill{typeof(transpose)}})

i_to_basis = lazy_map(k,a.args[1])
lazy_map( transpose, i_to_basis)
lazy_map(transpose, i_to_basis)
end

function lazy_map(
k::Broadcasting{typeof(∇∇)}, a::LazyArray{<:Fill{typeof(transpose)}})

i_to_basis = lazy_map(k,a.args[1])
lazy_map( transpose, i_to_basis)
lazy_map(transpose, i_to_basis)
end

# Gradient rules
Expand Down
2 changes: 1 addition & 1 deletion src/Fields/Fields.jl
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ export MockFieldArray
export Point
export inverse_map

export AffineMap
export AffineField
export affine_map

export gradient
Expand Down
10 changes: 10 additions & 0 deletions src/Fields/FieldsInterfaces.jl
Original file line number Diff line number Diff line change
Expand Up @@ -288,6 +288,16 @@ function lazy_map(::Operation{typeof(inv)},a::LazyArray{<:Fill{typeof(constant_f
lazy_map(constant_field,vinv)
end

struct ConstantMap <: Map end

return_cache(::ConstantMap,v::Number,x::Point) = nothing
evaluate!(cache,::ConstantMap,v::Number,x::Point) = v

function lazy_map(::typeof(evaluate),a::LazyArray{<:Fill{typeof(constant_field)}},x::AbstractArray)
values = a.args[1]
lazy_map(Broadcasting(ConstantMap()),values,x)
end

## Make Function behave like Field

return_cache(f::FieldGradient{N,<:Function},x::Point) where N = gradient(f.object,Val(N))
Expand Down
6 changes: 3 additions & 3 deletions src/Geometry/CartesianGrids.jl
Original file line number Diff line number Diff line change
Expand Up @@ -237,7 +237,7 @@ end

# Cell map

struct CartesianMap{D,T,L} <: AbstractArray{AffineMap{D,D,T,L},D}
struct CartesianMap{D,T,L} <: AbstractArray{AffineField{D,D,T,L},D}
data::CartesianDescriptor{D,T,typeof(identity)}
function CartesianMap(des::CartesianDescriptor{D,T}) where {D,T}
L = D*D
Expand All @@ -256,9 +256,9 @@ function Base.getindex(a::CartesianMap{D,T},I::Vararg{Integer,D}) where {D,T}
@inbounds for d in 1:D
p[d] = x0[d] + (I[d]-1)*dx[d]
end
origin = Point(p)
origin = Point(p)
grad = diagonal_tensor(VectorValue(dx))
AffineMap(grad,origin)
AffineField(grad,origin)
end

function lazy_map(::typeof(∇),a::CartesianMap)
Expand Down
4 changes: 2 additions & 2 deletions test/FieldsTests/AffineMapsTests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ using Test
origin = Point(1,1)
g = TensorValue(2,0,0,2)

h = AffineMap(g,origin)
h = AffineField(g,origin)
@test isa((h),ConstantField)
@test isa(Broadcasting(∇)(h),ConstantField)

Expand Down Expand Up @@ -39,7 +39,7 @@ cell_to_∇hx = lazy_map(evaluate,cell_to_∇h,cell_to_x)
test_array(cell_to_hx,fill(hx,ncells))
test_array(cell_to_∇hx,fill(∇hx,ncells))

T = AffineMap{3,3,Int}
T = AffineField{3,3,Int}
@test isa(zero(T),T)

#display(cell_to_hx)
Expand Down
8 changes: 4 additions & 4 deletions test/FieldsTests/InverseFieldsTests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -8,22 +8,22 @@ using Test

b0 = Point(0,0)
m0 = TensorValue(1,0,0,1)
id = AffineMap(m0,b0)
id = AffineField(m0,b0)

b1 = Point(1,1)
m1 = TensorValue(2,0,0,2)
h1 = AffineMap(m1,b1)
h1 = AffineField(m1,b1)

b2 = Point(3,3)
m2 = TensorValue(4,0,0,4)
h2 = AffineMap(m2,b2)
h2 = AffineField(m2,b2)

h = h2 h1

# (4x+3) ∘ (2x+1) = 8x+7
b3 = Point(7,7)
m3 = TensorValue(8,0,0,8)
h3 = AffineMap(m3,b3)
h3 = AffineField(m3,b3)

h1inv = inverse_map(h1)
h2inv = inverse_map(h2)
Expand Down
2 changes: 1 addition & 1 deletion test/GeometryTests/BoundaryTriangulationsTests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ glue = get_glue(btrian,Val(1))
glue = get_glue(btrian,Val(2))
@test glue.tface_to_mface === btrian.glue.face_to_cell

@test isa(get_cell_map(btrian)[1],AffineMap)
@test isa(get_cell_map(btrian)[1],AffineField)

face_s_q = glue.tface_to_mface_map

Expand Down
2 changes: 1 addition & 1 deletion test/GridapTests/issue_879.jl
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,6 @@ fΓ = interpolate_everywhere(fa, FESpace(Γ,reffe))
# ERROR: LoadError: DimensionMismatch: matrix is not square: dimensions are (1, 2)

# Corrections
# Modified src/Fields/AffineMaps.jl
# Modified src/Fields/AffineFields.jl

end

0 comments on commit 17a4f2e

Please sign in to comment.