|
| 1 | +module ITensorNetworksOMEinsumContractionOrdersExt |
| 2 | +using DocStringExtensions: TYPEDSIGNATURES |
| 3 | +using ITensorNetworks: ITensorNetworks |
| 4 | +using ITensors: ITensors, Index, ITensor, inds |
| 5 | +using NDTensors: dim |
| 6 | +using NDTensors.AlgorithmSelection: @Algorithm_str |
| 7 | +using OMEinsumContractionOrders: OMEinsumContractionOrders |
| 8 | + |
| 9 | +# OMEinsumContractionOrders wrapper for ITensors |
| 10 | +# Slicing is not supported, because it might require extra work to slice an `ITensor` correctly. |
| 11 | + |
| 12 | +const ITensorList = Union{Vector{ITensor},Tuple{Vararg{ITensor}}} |
| 13 | + |
| 14 | +# infer the output tensor labels |
| 15 | +# TODO: Use `symdiff` instead. |
| 16 | +function infer_output(inputs::AbstractVector{<:AbstractVector{<:Index}}) |
| 17 | + indslist = reduce(vcat, inputs) |
| 18 | + # get output indices |
| 19 | + iy = eltype(eltype(inputs))[] |
| 20 | + for l in indslist |
| 21 | + c = count(==(l), indslist) |
| 22 | + if c == 1 |
| 23 | + push!(iy, l) |
| 24 | + elseif c !== 2 |
| 25 | + error("Each index in a tensor network must appear at most twice!") |
| 26 | + end |
| 27 | + end |
| 28 | + return iy |
| 29 | +end |
| 30 | + |
| 31 | +# get a (labels, size_dict) representation of a collection of ITensors |
| 32 | +function rawcode(tensors::ITensorList) |
| 33 | + # we use id as the label |
| 34 | + indsAs = [collect(Index{Int}, ITensors.inds(A)) for A in tensors] |
| 35 | + ixs = collect.(inds.(tensors)) |
| 36 | + unique_labels = unique(reduce(vcat, indsAs)) |
| 37 | + size_dict = Dict([x => dim(x) for x in unique_labels]) |
| 38 | + index_dict = Dict([x => x for x in unique_labels]) |
| 39 | + return OMEinsumContractionOrders.EinCode(ixs, infer_output(indsAs)), size_dict, index_dict |
| 40 | +end |
| 41 | + |
| 42 | +""" |
| 43 | +$(TYPEDSIGNATURES) |
| 44 | +Optimize the contraction order of a tensor network specified as a vector tensors. |
| 45 | +Returns a [`NestedEinsum`](@ref) instance. |
| 46 | +### Examples |
| 47 | +```jldoctest |
| 48 | +julia> using ITensors, ITensorContractionOrders |
| 49 | +julia> i, j, k, l = Index(4), Index(5), Index(6), Index(7); |
| 50 | +julia> x, y, z = randomITensor(i, j), randomITensor(j, k), randomITensor(k, l); |
| 51 | +julia> net = optimize_contraction([x, y, z]; optimizer=TreeSA()); |
| 52 | +``` |
| 53 | +""" |
| 54 | +function optimize_contraction_nested_einsum( |
| 55 | + tensors::ITensorList; |
| 56 | + optimizer::OMEinsumContractionOrders.CodeOptimizer=OMEinsumContractionOrders.TreeSA(), |
| 57 | +) |
| 58 | + r, size_dict, index_dict = rawcode(tensors) |
| 59 | + # merge vectors can speed up contraction order finding |
| 60 | + # optimize the permutation of tensors is set to true |
| 61 | + res = OMEinsumContractionOrders.optimize_code( |
| 62 | + r, size_dict, optimizer, OMEinsumContractionOrders.MergeVectors(), true |
| 63 | + ) |
| 64 | + if res isa OMEinsumContractionOrders.SlicedEinsum # slicing is not supported! |
| 65 | + if length(res.slicing) != 0 |
| 66 | + @warn "Slicing is not yet supported by `ITensors`, removing slices..." |
| 67 | + end |
| 68 | + res = res.eins |
| 69 | + end |
| 70 | + return res |
| 71 | +end |
| 72 | + |
| 73 | +""" |
| 74 | +Convert NestedEinsum to contraction sequence, such as `[[1, 2], [3, 4]]`. |
| 75 | +""" |
| 76 | +function convert_to_contraction_sequence(net::OMEinsumContractionOrders.NestedEinsum) |
| 77 | + if OMEinsumContractionOrders.isleaf(net) |
| 78 | + return net.tensorindex |
| 79 | + else |
| 80 | + return convert_to_contraction_sequence.(net.args) |
| 81 | + end |
| 82 | +end |
| 83 | + |
| 84 | +""" |
| 85 | +Convert the result of `optimize_contraction` to a contraction sequence. |
| 86 | +""" |
| 87 | +function optimize_contraction_sequence( |
| 88 | + tensors::ITensorList; optimizer::OMEinsumContractionOrders.CodeOptimizer=TreeSA() |
| 89 | +) |
| 90 | + res = optimize_contraction_nested_einsum(tensors; optimizer) |
| 91 | + return convert_to_contraction_sequence(res) |
| 92 | +end |
| 93 | + |
| 94 | +""" |
| 95 | + GreedyMethod(; method=MinSpaceOut(), nrepeat=10) |
| 96 | +
|
| 97 | +The fast but poor greedy optimizer. Input arguments are: |
| 98 | +
|
| 99 | +* `method` is `MinSpaceDiff()` or `MinSpaceOut`. |
| 100 | + * `MinSpaceOut` choose one of the contraction that produces a minimum output tensor size, |
| 101 | + * `MinSpaceDiff` choose one of the contraction that decrease the space most. |
| 102 | +* `nrepeat` is the number of repeatition, returns the best contraction order. |
| 103 | +""" |
| 104 | +function ITensorNetworks.contraction_sequence( |
| 105 | + ::Algorithm"greedy", tn::Vector{ITensor}; kwargs... |
| 106 | +) |
| 107 | + return optimize_contraction_sequence( |
| 108 | + tn; optimizer=OMEinsumContractionOrders.GreedyMethod(; kwargs...) |
| 109 | + ) |
| 110 | +end |
| 111 | + |
| 112 | +""" |
| 113 | + TreeSA(; sc_target=20, βs=collect(0.01:0.05:15), ntrials=10, niters=50, |
| 114 | + sc_weight=1.0, rw_weight=0.2, initializer=:greedy, greedy_config=GreedyMethod(; nrepeat=1)) |
| 115 | +
|
| 116 | +Optimize the einsum contraction pattern using the simulated annealing on tensor expression tree. |
| 117 | +
|
| 118 | +* `sc_target` is the target space complexity, |
| 119 | +* `ntrials`, `βs` and `niters` are annealing parameters, doing `ntrials` indepedent annealings, each has inverse tempteratures specified by `βs`, in each temperature, do `niters` updates of the tree. |
| 120 | +* `sc_weight` is the relative importance factor of space complexity in the loss compared with the time complexity. |
| 121 | +* `rw_weight` is the relative importance factor of memory read and write in the loss compared with the time complexity. |
| 122 | +* `initializer` specifies how to determine the initial configuration, it can be `:greedy` or `:random`. If it is using `:greedy` method to generate the initial configuration, it also uses two extra arguments `greedy_method` and `greedy_nrepeat`. |
| 123 | +* `nslices` is the number of sliced legs, default is 0. |
| 124 | +* `fixed_slices` is a vector of sliced legs, default is `[]`. |
| 125 | +
|
| 126 | +### References |
| 127 | +* [Recursive Multi-Tensor Contraction for XEB Verification of Quantum Circuits](https://arxiv.org/abs/2108.05665) |
| 128 | +""" |
| 129 | +function ITensorNetworks.contraction_sequence(::Algorithm"tree_sa", tn; kwargs...) |
| 130 | + return optimize_contraction_sequence( |
| 131 | + tn; optimizer=OMEinsumContractionOrders.TreeSA(; kwargs...) |
| 132 | + ) |
| 133 | +end |
| 134 | + |
| 135 | +""" |
| 136 | + SABipartite(; sc_target=25, ntrials=50, βs=0.1:0.2:15.0, niters=1000 |
| 137 | + max_group_size=40, greedy_config=GreedyMethod(), initializer=:random) |
| 138 | +
|
| 139 | +Optimize the einsum code contraction order using the Simulated Annealing bipartition + Greedy approach. |
| 140 | +This program first recursively cuts the tensors into several groups using simulated annealing, |
| 141 | +with maximum group size specifed by `max_group_size` and maximum space complexity specified by `sc_target`, |
| 142 | +Then finds the contraction order inside each group with the greedy search algorithm. Other arguments are: |
| 143 | +
|
| 144 | +* `size_dict`, a dictionary that specifies leg dimensions, |
| 145 | +* `sc_target` is the target space complexity, defined as `log2(number of elements in the largest tensor)`, |
| 146 | +* `max_group_size` is the maximum size that allowed to used greedy search, |
| 147 | +* `βs` is a list of inverse temperature `1/T`, |
| 148 | +* `niters` is the number of iteration in each temperature, |
| 149 | +* `ntrials` is the number of repetition (with different random seeds), |
| 150 | +* `greedy_config` configures the greedy method, |
| 151 | +* `initializer`, the partition configuration initializer, one can choose `:random` or `:greedy` (slow but better). |
| 152 | +
|
| 153 | +### References |
| 154 | +* [Hyper-optimized tensor network contraction](https://arxiv.org/abs/2002.01935) |
| 155 | +""" |
| 156 | +function ITensorNetworks.contraction_sequence(::Algorithm"sa_bipartite", tn; kwargs...) |
| 157 | + return optimize_contraction_sequence( |
| 158 | + tn; optimizer=OMEinsumContractionOrders.SABipartite(; kwargs...) |
| 159 | + ) |
| 160 | +end |
| 161 | + |
| 162 | +""" |
| 163 | + KaHyParBipartite(; sc_target, imbalances=collect(0.0:0.005:0.8), |
| 164 | + max_group_size=40, greedy_config=GreedyMethod()) |
| 165 | +
|
| 166 | +Optimize the einsum code contraction order using the KaHyPar + Greedy approach. |
| 167 | +This program first recursively cuts the tensors into several groups using KaHyPar, |
| 168 | +with maximum group size specifed by `max_group_size` and maximum space complexity specified by `sc_target`, |
| 169 | +Then finds the contraction order inside each group with the greedy search algorithm. Other arguments are: |
| 170 | +
|
| 171 | +* `sc_target` is the target space complexity, defined as `log2(number of elements in the largest tensor)`, |
| 172 | +* `imbalances` is a KaHyPar parameter that controls the group sizes in hierarchical bipartition, |
| 173 | +* `max_group_size` is the maximum size that allowed to used greedy search, |
| 174 | +* `greedy_config` is a greedy optimizer. |
| 175 | +
|
| 176 | +### References |
| 177 | +* [Hyper-optimized tensor network contraction](https://arxiv.org/abs/2002.01935) |
| 178 | +* [Simulating the Sycamore quantum supremacy circuits](https://arxiv.org/abs/2103.03074) |
| 179 | +""" |
| 180 | +function ITensorNetworks.contraction_sequence(::Algorithm"kahypar_bipartite", tn; kwargs...) |
| 181 | + return optimize_contraction_sequence( |
| 182 | + tn; optimizer=OMEinsumContractionOrders.KaHyParBipartite(; kwargs...) |
| 183 | + ) |
| 184 | +end |
| 185 | +end |
0 commit comments