2929`LUFactorization(pivot=LinearAlgebra.RowMaximum())`
3030
3131Julia's built in `lu`. Equivalent to calling `lu!(A)`
32-
32+
3333* On dense matrices, this uses the current BLAS implementation of the user's computer,
3434which by default is OpenBLAS but will use MKL if the user does `using MKL` in their
3535system.
135135`QRFactorization(pivot=LinearAlgebra.NoPivot(),blocksize=16)`
136136
137137Julia's built in `qr`. Equivalent to calling `qr!(A)`.
138-
138+
139139* On dense matrices, this uses the current BLAS implementation of the user's computer
140140which by default is OpenBLAS but will use MKL if the user does `using MKL` in their
141141system.
242242 function do_factorization (alg:: CholeskyFactorization , A, b, u)
243243 A = convert (AbstractMatrix, A)
244244 if A isa SparseMatrixCSC
245- fact = cholesky! (A; shift = alg. shift, check = false , perm = alg. perm)
245+ # fact = cholesky!(A; shift = alg.shift, check = false, perm = alg.perm)
246+ # fact = @time cholesky!(A; check = false)
247+ fact = cholesky (A; shift = alg. shift, check = false , perm = alg. perm)
246248 elseif alg. pivot === Val (false ) || alg. pivot === NoPivot ()
247249 fact = cholesky! (A, alg. pivot; check = false )
248250 else
@@ -268,6 +270,7 @@ function init_cacheval(alg::CholeskyFactorization, A, b, u, Pl, Pr,
268270 maxiters:: Int , abstol, reltol, verbose:: Bool ,
269271 assumptions:: OperatorAssumptions )
270272 ArrayInterface. cholesky_instance (convert (AbstractMatrix, A), alg. pivot)
273+ # cholesky!(similar(A, 1, 1); check=false)
271274end
272275
273276@static if VERSION < v " 1.8beta"
346349`SVDFactorization(full=false,alg=LinearAlgebra.DivideAndConquer())`
347350
348351Julia's built in `svd`. Equivalent to `svd!(A)`.
349-
352+
350353* On dense matrices, this uses the current BLAS implementation of the user's computer
351354which by default is OpenBLAS but will use MKL if the user does `using MKL` in their
352355system.
444447`GenericFactorization(;fact_alg=LinearAlgebra.factorize)`: Constructs a linear solver from a generic
445448 factorization algorithm `fact_alg` which complies with the Base.LinearAlgebra
446449 factorization API. Quoting from Base:
447-
450+
448451 * If `A` is upper or lower triangular (or diagonal), no factorization of `A` is
449452 required. The system is then solved with either forward or backward substitution.
450453 For non-triangular square matrices, an LU factorization is used.
666669"""
667670`UMFPACKFactorization(;reuse_symbolic=true, check_pattern=true)`
668671
669- A fast sparse multithreaded LU-factorization which specializes on sparsity
672+ A fast sparse multithreaded LU-factorization which specializes on sparsity
670673patterns with “more structure”.
671674
672675!!! note
@@ -850,7 +853,7 @@ Only supports sparse matrices.
850853
851854## Keyword Arguments
852855
853- * shift: the shift argument in CHOLMOD.
856+ * shift: the shift argument in CHOLMOD.
854857* perm: the perm argument in CHOLMOD
855858"""
856859Base. @kwdef struct CHOLMODFactorization{T} <: AbstractFactorization
@@ -916,12 +919,12 @@ end
916919# # RFLUFactorization
917920
918921"""
919- `RFLUFactorization()`
922+ `RFLUFactorization()`
920923
921924A fast pure Julia LU-factorization implementation
922925using RecursiveFactorization.jl. This is by far the fastest LU-factorization
923926implementation, usually outperforming OpenBLAS and MKL for smaller matrices
924- (<500x500), but currently optimized only for Base `Array` with `Float32` or `Float64`.
927+ (<500x500), but currently optimized only for Base `Array` with `Float32` or `Float64`.
925928Additional optimization for complex matrices is in the works.
926929"""
927930struct RFLUFactorization{P, T} <: AbstractFactorization
@@ -1179,7 +1182,7 @@ end
11791182# But I'm not sure it makes sense as a GenericFactorization
11801183# since it just uses `LAPACK.getrf!`.
11811184"""
1182- `FastLUFactorization()`
1185+ `FastLUFactorization()`
11831186
11841187The FastLapackInterface.jl version of the LU factorization. Notably,
11851188this version does not allow for choice of pivoting method.
@@ -1210,7 +1213,7 @@ function SciMLBase.solve!(cache::LinearCache, alg::FastLUFactorization; kwargs..
12101213end
12111214
12121215"""
1213- `FastQRFactorization()`
1216+ `FastQRFactorization()`
12141217
12151218The FastLapackInterface.jl version of the QR factorization.
12161219"""
0 commit comments