Skip to content

Commit c0621b7

Browse files
authored
More package extensions (#161)
1 parent 0005598 commit c0621b7

File tree

21 files changed

+266
-400
lines changed

21 files changed

+266
-400
lines changed

.github/workflows/CI.yml

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,6 @@ jobs:
1818
fail-fast: false
1919
matrix:
2020
version:
21-
- '1.7'
2221
- '1'
2322
os:
2423
- ubuntu-latest

Project.toml

Lines changed: 16 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
name = "ITensorNetworks"
22
uuid = "2919e153-833c-4bdc-8836-1ea460a35fc7"
33
authors = ["Matthew Fishman <mfishman@flatironinstitute.org> and contributors"]
4-
version = "0.9.0"
4+
version = "0.10.0"
55

66
[deps]
77
AbstractTrees = "1520ce14-60c1-5f80-bbc7-55ef81b5835c"
@@ -13,19 +13,16 @@ Dictionaries = "85a47980-9c8c-11e8-2b9f-f7ca1fa99fb4"
1313
Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f"
1414
DocStringExtensions = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae"
1515
Graphs = "86223c79-3864-5bf0-83f7-82e725a168b6"
16-
GraphsFlows = "06909019-6f44-4949-96fc-b9d9aaa02889"
1716
ITensors = "9136182c-28ba-11e9-034c-db9fb085ebd5"
1817
IsApprox = "28f27b66-4bd8-47e7-9110-e2746eb8bed7"
1918
IterTools = "c8e1da08-722c-5040-9ed9-7db0dc04731e"
2019
KrylovKit = "0b1a1467-8014-51b9-945f-bf0ae24f4b77"
2120
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
2221
NDTensors = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf"
2322
NamedGraphs = "678767b0-92e7-4007-89e4-4527a8725b19"
24-
Observers = "338f10d5-c7f1-4033-a7d1-f9dec39bcaa0"
2523
PackageExtensionCompat = "65ce6f38-6b18-4e1d-a461-8949797d7930"
2624
Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7"
2725
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
28-
Requires = "ae029012-a4dd-5104-9daa-d747884805df"
2926
SerializedElementArrays = "d3ce8812-9567-47e9-a7b5-65a6d70a3065"
3027
SimpleTraits = "699a6c99-e7fa-54fc-8d76-47d257e15c1d"
3128
SparseArrayKit = "a9a3c162-d163-4c15-8926-b8794fbefed2"
@@ -38,9 +35,15 @@ TupleTools = "9d95972d-f1c8-5527-a6e0-b4b365fa01f6"
3835

3936
[weakdeps]
4037
EinExprs = "b1794770-133b-4de1-afb4-526377e9f4c5"
38+
GraphsFlows = "06909019-6f44-4949-96fc-b9d9aaa02889"
39+
Observers = "338f10d5-c7f1-4033-a7d1-f9dec39bcaa0"
40+
OMEinsumContractionOrders = "6f22d1fd-8eed-4bb7-9776-e7d684900715"
4141

4242
[extensions]
4343
ITensorNetworksEinExprsExt = "EinExprs"
44+
ITensorNetworksGraphsFlowsExt = "GraphsFlows"
45+
ITensorNetworksObserversExt = "Observers"
46+
ITensorNetworksOMEinsumContractionOrdersExt = "OMEinsumContractionOrders"
4447

4548
[compat]
4649
AbstractTrees = "0.4.4"
@@ -50,32 +53,35 @@ DataGraphs = "0.2.2"
5053
DataStructures = "0.18"
5154
Dictionaries = "0.4"
5255
Distributions = "0.25.86"
53-
DocStringExtensions = "0.8, 0.9"
56+
DocStringExtensions = "0.9"
5457
EinExprs = "0.6.4"
5558
Graphs = "1.8"
5659
GraphsFlows = "0.1.1"
57-
ITensors = "0.3.58, 0.4"
60+
ITensors = "0.4"
5861
IsApprox = "0.1"
5962
IterTools = "1.4.0"
6063
KrylovKit = "0.6, 0.7"
6164
NamedGraphs = "0.5.1"
62-
NDTensors = "0.2, 0.3"
65+
NDTensors = "0.3"
6366
Observers = "0.2"
67+
OMEinsumContractionOrders = "0.8.3"
6468
PackageExtensionCompat = "1"
65-
Requires = "1.3"
6669
SerializedElementArrays = "0.1"
6770
SimpleTraits = "0.9"
68-
SparseArrayKit = "0.2.1, 0.3"
71+
SparseArrayKit = "0.3"
6972
SplitApplyCombine = "1.2"
7073
StaticArrays = "1.5.12"
7174
StructWalk = "0.2"
7275
Suppressor = "0.2"
7376
TimerOutputs = "0.5.22"
7477
TupleTools = "1.4"
75-
julia = "1.7"
78+
julia = "1.10"
7679

7780
[extras]
7881
EinExprs = "b1794770-133b-4de1-afb4-526377e9f4c5"
82+
GraphsFlows = "06909019-6f44-4949-96fc-b9d9aaa02889"
83+
Observers = "338f10d5-c7f1-4033-a7d1-f9dec39bcaa0"
84+
OMEinsumContractionOrders = "6f22d1fd-8eed-4bb7-9776-e7d684900715"
7985
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
8086

8187
[targets]
Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,19 @@
1+
module ITensorNetworksGraphsFlowsExt
2+
using Graphs: AbstractGraph
3+
using GraphsFlows: GraphsFlows
4+
using ITensorNetworks: ITensorNetworks
5+
using NDTensors.AlgorithmSelection: @Algorithm_str
6+
7+
function ITensorNetworks.mincut(
8+
::Algorithm"GraphsFlows",
9+
graph::AbstractGraph,
10+
source_vertex,
11+
target_vertex;
12+
capacity_matrix,
13+
alg=GraphsFlows.PushRelabelAlgorithm(),
14+
)
15+
# TODO: Replace with `Backend(backend)`.
16+
return GraphsFlows.mincut(graph, source_vertex, target_vertex, capacity_matrix, alg)
17+
end
18+
19+
end
Lines changed: 185 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,185 @@
1+
module ITensorNetworksOMEinsumContractionOrdersExt
2+
using DocStringExtensions: TYPEDSIGNATURES
3+
using ITensorNetworks: ITensorNetworks
4+
using ITensors: ITensors, Index, ITensor, inds
5+
using NDTensors: dim
6+
using NDTensors.AlgorithmSelection: @Algorithm_str
7+
using OMEinsumContractionOrders: OMEinsumContractionOrders
8+
9+
# OMEinsumContractionOrders wrapper for ITensors
10+
# Slicing is not supported, because it might require extra work to slice an `ITensor` correctly.
11+
12+
const ITensorList = Union{Vector{ITensor},Tuple{Vararg{ITensor}}}
13+
14+
# infer the output tensor labels
15+
# TODO: Use `symdiff` instead.
16+
function infer_output(inputs::AbstractVector{<:AbstractVector{<:Index}})
17+
indslist = reduce(vcat, inputs)
18+
# get output indices
19+
iy = eltype(eltype(inputs))[]
20+
for l in indslist
21+
c = count(==(l), indslist)
22+
if c == 1
23+
push!(iy, l)
24+
elseif c !== 2
25+
error("Each index in a tensor network must appear at most twice!")
26+
end
27+
end
28+
return iy
29+
end
30+
31+
# get a (labels, size_dict) representation of a collection of ITensors
32+
function rawcode(tensors::ITensorList)
33+
# we use id as the label
34+
indsAs = [collect(Index{Int}, ITensors.inds(A)) for A in tensors]
35+
ixs = collect.(inds.(tensors))
36+
unique_labels = unique(reduce(vcat, indsAs))
37+
size_dict = Dict([x => dim(x) for x in unique_labels])
38+
index_dict = Dict([x => x for x in unique_labels])
39+
return OMEinsumContractionOrders.EinCode(ixs, infer_output(indsAs)), size_dict, index_dict
40+
end
41+
42+
"""
43+
$(TYPEDSIGNATURES)
44+
Optimize the contraction order of a tensor network specified as a vector tensors.
45+
Returns a [`NestedEinsum`](@ref) instance.
46+
### Examples
47+
```jldoctest
48+
julia> using ITensors, ITensorContractionOrders
49+
julia> i, j, k, l = Index(4), Index(5), Index(6), Index(7);
50+
julia> x, y, z = randomITensor(i, j), randomITensor(j, k), randomITensor(k, l);
51+
julia> net = optimize_contraction([x, y, z]; optimizer=TreeSA());
52+
```
53+
"""
54+
function optimize_contraction_nested_einsum(
55+
tensors::ITensorList;
56+
optimizer::OMEinsumContractionOrders.CodeOptimizer=OMEinsumContractionOrders.TreeSA(),
57+
)
58+
r, size_dict, index_dict = rawcode(tensors)
59+
# merge vectors can speed up contraction order finding
60+
# optimize the permutation of tensors is set to true
61+
res = OMEinsumContractionOrders.optimize_code(
62+
r, size_dict, optimizer, OMEinsumContractionOrders.MergeVectors(), true
63+
)
64+
if res isa OMEinsumContractionOrders.SlicedEinsum # slicing is not supported!
65+
if length(res.slicing) != 0
66+
@warn "Slicing is not yet supported by `ITensors`, removing slices..."
67+
end
68+
res = res.eins
69+
end
70+
return res
71+
end
72+
73+
"""
74+
Convert NestedEinsum to contraction sequence, such as `[[1, 2], [3, 4]]`.
75+
"""
76+
function convert_to_contraction_sequence(net::OMEinsumContractionOrders.NestedEinsum)
77+
if OMEinsumContractionOrders.isleaf(net)
78+
return net.tensorindex
79+
else
80+
return convert_to_contraction_sequence.(net.args)
81+
end
82+
end
83+
84+
"""
85+
Convert the result of `optimize_contraction` to a contraction sequence.
86+
"""
87+
function optimize_contraction_sequence(
88+
tensors::ITensorList; optimizer::OMEinsumContractionOrders.CodeOptimizer=TreeSA()
89+
)
90+
res = optimize_contraction_nested_einsum(tensors; optimizer)
91+
return convert_to_contraction_sequence(res)
92+
end
93+
94+
"""
95+
GreedyMethod(; method=MinSpaceOut(), nrepeat=10)
96+
97+
The fast but poor greedy optimizer. Input arguments are:
98+
99+
* `method` is `MinSpaceDiff()` or `MinSpaceOut`.
100+
* `MinSpaceOut` choose one of the contraction that produces a minimum output tensor size,
101+
* `MinSpaceDiff` choose one of the contraction that decrease the space most.
102+
* `nrepeat` is the number of repeatition, returns the best contraction order.
103+
"""
104+
function ITensorNetworks.contraction_sequence(
105+
::Algorithm"greedy", tn::Vector{ITensor}; kwargs...
106+
)
107+
return optimize_contraction_sequence(
108+
tn; optimizer=OMEinsumContractionOrders.GreedyMethod(; kwargs...)
109+
)
110+
end
111+
112+
"""
113+
TreeSA(; sc_target=20, βs=collect(0.01:0.05:15), ntrials=10, niters=50,
114+
sc_weight=1.0, rw_weight=0.2, initializer=:greedy, greedy_config=GreedyMethod(; nrepeat=1))
115+
116+
Optimize the einsum contraction pattern using the simulated annealing on tensor expression tree.
117+
118+
* `sc_target` is the target space complexity,
119+
* `ntrials`, `βs` and `niters` are annealing parameters, doing `ntrials` indepedent annealings, each has inverse tempteratures specified by `βs`, in each temperature, do `niters` updates of the tree.
120+
* `sc_weight` is the relative importance factor of space complexity in the loss compared with the time complexity.
121+
* `rw_weight` is the relative importance factor of memory read and write in the loss compared with the time complexity.
122+
* `initializer` specifies how to determine the initial configuration, it can be `:greedy` or `:random`. If it is using `:greedy` method to generate the initial configuration, it also uses two extra arguments `greedy_method` and `greedy_nrepeat`.
123+
* `nslices` is the number of sliced legs, default is 0.
124+
* `fixed_slices` is a vector of sliced legs, default is `[]`.
125+
126+
### References
127+
* [Recursive Multi-Tensor Contraction for XEB Verification of Quantum Circuits](https://arxiv.org/abs/2108.05665)
128+
"""
129+
function ITensorNetworks.contraction_sequence(::Algorithm"tree_sa", tn; kwargs...)
130+
return optimize_contraction_sequence(
131+
tn; optimizer=OMEinsumContractionOrders.TreeSA(; kwargs...)
132+
)
133+
end
134+
135+
"""
136+
SABipartite(; sc_target=25, ntrials=50, βs=0.1:0.2:15.0, niters=1000
137+
max_group_size=40, greedy_config=GreedyMethod(), initializer=:random)
138+
139+
Optimize the einsum code contraction order using the Simulated Annealing bipartition + Greedy approach.
140+
This program first recursively cuts the tensors into several groups using simulated annealing,
141+
with maximum group size specifed by `max_group_size` and maximum space complexity specified by `sc_target`,
142+
Then finds the contraction order inside each group with the greedy search algorithm. Other arguments are:
143+
144+
* `size_dict`, a dictionary that specifies leg dimensions,
145+
* `sc_target` is the target space complexity, defined as `log2(number of elements in the largest tensor)`,
146+
* `max_group_size` is the maximum size that allowed to used greedy search,
147+
* `βs` is a list of inverse temperature `1/T`,
148+
* `niters` is the number of iteration in each temperature,
149+
* `ntrials` is the number of repetition (with different random seeds),
150+
* `greedy_config` configures the greedy method,
151+
* `initializer`, the partition configuration initializer, one can choose `:random` or `:greedy` (slow but better).
152+
153+
### References
154+
* [Hyper-optimized tensor network contraction](https://arxiv.org/abs/2002.01935)
155+
"""
156+
function ITensorNetworks.contraction_sequence(::Algorithm"sa_bipartite", tn; kwargs...)
157+
return optimize_contraction_sequence(
158+
tn; optimizer=OMEinsumContractionOrders.SABipartite(; kwargs...)
159+
)
160+
end
161+
162+
"""
163+
KaHyParBipartite(; sc_target, imbalances=collect(0.0:0.005:0.8),
164+
max_group_size=40, greedy_config=GreedyMethod())
165+
166+
Optimize the einsum code contraction order using the KaHyPar + Greedy approach.
167+
This program first recursively cuts the tensors into several groups using KaHyPar,
168+
with maximum group size specifed by `max_group_size` and maximum space complexity specified by `sc_target`,
169+
Then finds the contraction order inside each group with the greedy search algorithm. Other arguments are:
170+
171+
* `sc_target` is the target space complexity, defined as `log2(number of elements in the largest tensor)`,
172+
* `imbalances` is a KaHyPar parameter that controls the group sizes in hierarchical bipartition,
173+
* `max_group_size` is the maximum size that allowed to used greedy search,
174+
* `greedy_config` is a greedy optimizer.
175+
176+
### References
177+
* [Hyper-optimized tensor network contraction](https://arxiv.org/abs/2002.01935)
178+
* [Simulating the Sycamore quantum supremacy circuits](https://arxiv.org/abs/2103.03074)
179+
"""
180+
function ITensorNetworks.contraction_sequence(::Algorithm"kahypar_bipartite", tn; kwargs...)
181+
return optimize_contraction_sequence(
182+
tn; optimizer=OMEinsumContractionOrders.KaHyParBipartite(; kwargs...)
183+
)
184+
end
185+
end
Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
module ITensorNetworksObserversExt
2+
using ITensorNetworks: ITensorNetworks
3+
using Observers.DataFrames: AbstractDataFrame
4+
using Observers: Observers
5+
6+
function ITensorNetworks.update_observer!(observer::AbstractDataFrame; kwargs...)
7+
return Observers.update!(observer; kwargs...)
8+
end
9+
end

src/ITensorNetworks.jl

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
module ITensorNetworks
22
include("lib/BaseExtensions/src/BaseExtensions.jl")
33
include("lib/ITensorsExtensions/src/ITensorsExtensions.jl")
4-
include("observers.jl")
54
include("visualize.jl")
65
include("graphs.jl")
76
include("abstractindsnetwork.jl")
@@ -33,6 +32,7 @@ include("caches/beliefpropagationcache.jl")
3332
include("contraction_tree_to_graph.jl")
3433
include("gauging.jl")
3534
include("utils.jl")
35+
include("update_observer.jl")
3636
include("solvers/local_solvers/eigsolve.jl")
3737
include("solvers/local_solvers/exponentiate.jl")
3838
include("solvers/local_solvers/dmrg_x.jl")
@@ -66,11 +66,7 @@ include("lib/ModelHamiltonians/src/ModelHamiltonians.jl")
6666
include("lib/ModelNetworks/src/ModelNetworks.jl")
6767

6868
using PackageExtensionCompat: @require_extensions
69-
using Requires: @require
7069
function __init__()
7170
@require_extensions
72-
@require OMEinsumContractionOrders = "6f22d1fd-8eed-4bb7-9776-e7d684900715" include(
73-
"requires/omeinsumcontractionorders.jl"
74-
)
7571
end
7672
end

src/apply.jl

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,6 @@ using ITensors.ITensorMPS: siteinds
2727
using KrylovKit: linsolve
2828
using LinearAlgebra: eigen, norm, svd
2929
using NamedGraphs: NamedEdge, has_edge
30-
using Observers: Observers
3130

3231
function full_update_bp(
3332
o,

src/contract_approx/mincut.jl

Lines changed: 14 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
11
using AbstractTrees: Leaves, PostOrderDFS
22
using Combinatorics: powerset
33
using Graphs: dijkstra_shortest_paths, weights
4-
using GraphsFlows: GraphsFlows
54
using NamedGraphs: NamedDiGraph
5+
using NDTensors.AlgorithmSelection: Algorithm
66

77
# a large number to prevent this edge being a cut
88
MAX_WEIGHT = 1e32
@@ -37,6 +37,18 @@ function binary_tree_structure(tn::ITensorNetwork, outinds::Vector)
3737
return _binary_tree_structure(tn, outinds; maximally_unbalanced=false)
3838
end
3939

40+
function mincut(graph::AbstractGraph, source_vertex, target_vertex; backend, kwargs...)
41+
# TODO: Replace with `Backend(backend)`.
42+
return mincut(Algorithm(backend), graph, source_vertex, target_vertex; kwargs...)
43+
end
44+
45+
# TODO: Replace with `backend::Backend`.
46+
function mincut(
47+
backend::Algorithm, graph::AbstractGraph, source_vertex, target_vertex; kwargs...
48+
)
49+
return error("Backend `$backend` not implemented for `mincut`.")
50+
end
51+
4052
"""
4153
Calculate the mincut between two subsets of the uncontracted inds
4254
(source_inds and terminal_inds) of the input tn.
@@ -52,7 +64,7 @@ function _mincut(tn::ITensorNetwork, source_inds::Vector, terminal_inds::Vector)
5264
tn = disjoint_union(
5365
ITensorNetwork([ITensor(source_inds...), ITensor(terminal_inds...)]), tn
5466
)
55-
return GraphsFlows.mincut(tn, (1, 1), (2, 1), weights(tn))
67+
return mincut(tn, (1, 1), (2, 1); backend="GraphsFlows", capacity_matrix=weights(tn))
5668
end
5769

5870
"""

0 commit comments

Comments
 (0)