diff --git a/.claude/CLAUDE.md b/.claude/CLAUDE.md index e125c19d..93a72045 100644 --- a/.claude/CLAUDE.md +++ b/.claude/CLAUDE.md @@ -162,6 +162,7 @@ impl ReduceTo for Source { ... } - Expression strings are parsed at compile time by a Pratt parser in the proc macro crate - Variable names are validated against actual getter methods on the source type — typos cause compile errors - Each problem type provides inherent getter methods (e.g., `num_vertices()`, `num_edges()`) that the overhead expressions reference +- **Overhead expressions describe scaling (asymptotic upper bounds), not exact sizes.** To determine the actual target problem size for a specific instance, read the `reduce_to()` construction code and count the actual variables/constraints/vertices built. - `ReductionOverhead` stores `Vec<(&'static str, Expr)>` — field name to symbolic expression mappings - `ReductionEntry` has both symbolic (`overhead_fn`) and compiled (`overhead_eval_fn`) evaluation — the compiled version calls getters directly - `VariantEntry` has both a complexity string and compiled `complexity_eval_fn` — same pattern @@ -212,6 +213,8 @@ Reduction graph nodes use variant key-value pairs from `Problem::variant()`: ## Testing Requirements +**No single test should take more than 5 seconds.** If a test requires solving a large instance (e.g., ILP with thousands of variables), use a smaller test instance or a faster solver. Tests that exceed 5s block CI and must be refactored. + **Reference implementations — read these first:** - **Reduction test:** `src/unit_tests/rules/minimumvertexcover_maximumindependentset.rs` — closed-loop pattern - **Model test:** `src/unit_tests/models/graph/maximum_independent_set.rs` — evaluation, serialization diff --git a/docs/paper/reductions.typ b/docs/paper/reductions.typ index 85115827..3cdd48fa 100644 --- a/docs/paper/reductions.typ +++ b/docs/paper/reductions.typ @@ -244,6 +244,7 @@ "MinimumDisjunctiveNormalForm": [Minimum Disjunctive Normal Form], "MinimumGraphBandwidth": [Minimum Graph Bandwidth], "MinimumMetricDimension": [Minimum Metric Dimension], + "DecisionMinimumDominatingSet": [Decision Minimum Dominating Set], "DecisionMinimumVertexCover": [Decision Minimum Vertex Cover], "MinimumCodeGenerationUnlimitedRegisters": [Minimum Code Generation (Unlimited Registers)], "RegisterSufficiency": [Register Sufficiency], @@ -3774,9 +3775,9 @@ A classical NP-complete problem from Garey and Johnson @garey1979[Ch.~3, p.~76], #{ let x = load-model-example("QuadraticDiophantineEquations") - let a = x.instance.a - let b = x.instance.b - let c = x.instance.c + let a = int(x.instance.a) + let b = int(x.instance.b) + let c = int(x.instance.c) let config = x.optimal_config let xval = config.at(0) + 1 let yval = int((c - a * xval * xval) / b) @@ -6633,11 +6634,11 @@ A classical NP-complete problem from Garey and Johnson @garey1979[Ch.~3, p.~76], let count = sel-pairs.len() [ #problem-def("MinimumFaultDetectionTestSet")[ - Given a directed acyclic graph $G = (V, A)$ with $n = |V|$ vertices, designated input vertices $I subset.eq V$, and designated output vertices $O subset.eq V$, find the minimum number of input-output pairs $(i, o) in I times O$ such that the union of their coverage sets covers all vertices $V$. For a pair $(i, o)$, the coverage set is the set of vertices reachable from $i$ that can also reach $o$. + Given a directed acyclic graph $G = (V, A)$ with $n = |V|$ vertices, designated input vertices $I subset.eq V$, and designated output vertices $O subset.eq V$, find the minimum number of input-output pairs $(i, o) in I times O$ such that the union of their coverage sets covers every internal vertex in $V minus (I union O)$. For a pair $(i, o)$, the coverage set is the set of vertices reachable from $i$ that can also reach $o$. ][ - Fault detection test sets arise in hardware testing: each input-output path through a circuit's DAG representation can detect faults at the vertices it traverses, and the goal is to find the fewest test paths that collectively exercise every component. The problem generalises Set Cover over a structured family of subsets induced by DAG reachability.#footnote[No algorithm improving on brute-force enumeration of all $2^(|I| dot |O|)$ input-output pair subsets is known for the general case.] + Fault detection test sets arise in hardware testing: each input-output path through a circuit's DAG representation exercises the internal components it traverses, while the boundary pins themselves are fixed sources and sinks. The problem therefore asks for the fewest test pairs whose induced paths cover all internal vertices. It generalises Set Cover over a structured family of subsets induced by DAG reachability.#footnote[No algorithm improving on brute-force enumeration of all $2^(|I| dot |O|)$ input-output pair subsets is known for the general case.] - *Example.* Consider $n = #n$ vertices with inputs $I = {#inputs.map(str).join(", ")}$ and outputs $O = {#outputs.map(str).join(", ")}$. Arcs: #{arcs.map(a => $#(a.at(0)) arrow.r #(a.at(1))$).join(", ")}. Selecting pair $(#(inputs.at(0)), #(outputs.at(0)))$ covers ${0, 2, 3, 5}$, and pair $(#(inputs.at(1)), #(outputs.at(1)))$ covers ${1, 3, 4, 6}$. Their union is all $#n$ vertices, giving an optimal count of $#count$. + *Example.* Consider $n = #n$ vertices with inputs $I = {#inputs.map(str).join(", ")}$ and outputs $O = {#outputs.map(str).join(", ")}$. The internal vertices are ${2, 3, 4}$. Arcs: #{arcs.map(a => $#(a.at(0)) arrow.r #(a.at(1))$).join(", ")}. Selecting pair $(#(inputs.at(0)), #(outputs.at(0)))$ covers internal vertices ${2, 3}$, and pair $(#(inputs.at(1)), #(outputs.at(1)))$ covers internal vertices ${3, 4}$. Their union is all internal vertices, giving an optimal count of $#count$. #pred-commands( "pred create --example MinimumFaultDetectionTestSet -o mfdts.json", @@ -9237,6 +9238,78 @@ Each reduction is presented as a *Rule* (with linked problem names and overhead _Solution extraction._ For IS solution $S$, return $C = V backslash S$, i.e.\ flip each variable: $c_v = 1 - s_v$. ] +#let dmds_mmmc = load-example( + "DecisionMinimumDominatingSet", + "MinMaxMulticenter", + source-variant: (graph: "SimpleGraph", weight: "One"), + target-variant: (graph: "SimpleGraph", weight: "One"), +) +#let dmds_mmmc_sol = dmds_mmmc.solutions.at(0) +#reduction-rule("DecisionMinimumDominatingSet", "MinMaxMulticenter", + example: true, + example-source-variant: (graph: "SimpleGraph", weight: "One"), + example-target-variant: (graph: "SimpleGraph", weight: "One"), + example-caption: [6-vertex unit graph: dominating set of size 2 equals a 2-center of radius 1], + extra: [ + #pred-commands( + "pred create --example " + problem-spec(dmds_mmmc.source) + " -o dmds.json", + "pred reduce dmds.json --to " + target-spec(dmds_mmmc) + " -o bundle.json", + "pred solve bundle.json", + "pred evaluate dmds.json --config " + dmds_mmmc_sol.source_config.map(str).join(","), + ) + *Step 1 -- Source instance.* The source graph has vertices ${0, 1, 2, 3, 4, 5}$, edges #{dmds_mmmc.source.instance.inner.graph.edges.map(e => $(#e.at(0), #e.at(1))$).join(", ")}, and bound $K = #dmds_mmmc.source.instance.bound$. The stored dominating-set witness is $D = {#dmds_mmmc_sol.source_config.enumerate().filter(((i, x)) => x == 1).map(((i, _)) => str(i)).join(", ")}$. + + *Step 2 -- Build the target instance.* Keep the graph unchanged, assign weight $1$ to every vertex, assign length $1$ to every edge, and set the number of centers to $k = #dmds_mmmc.target.instance.k$. The target therefore still has $#graph-num-vertices(dmds_mmmc.target.instance)$ vertices and $#graph-num-edges(dmds_mmmc.target.instance)$ edges. + + *Step 3 -- Verify a witness.* Choosing centers $P = {#dmds_mmmc_sol.target_config.enumerate().filter(((i, x)) => x == 1).map(((i, _)) => str(i)).join(", ")}$ yields distances $(0, 1, 1, 0, 1, 1)$ to the nearest center, so the maximum weighted distance is $1$. The extracted source witness is the same indicator vector, hence a dominating set of size $2$ #sym.checkmark + ], +)[ + This $O(n + m)$ parameter-setting reduction @garey1979[ND50] keeps the graph unchanged, replaces all vertex weights and edge lengths by $1$, and copies the decision budget $K$ into the target center count $k$. On such unit graphs, a $k$-center solution of radius at most $1$ exists exactly when every vertex is itself chosen or adjacent to a chosen vertex, which is the dominating-set condition. +][ + _Construction._ Given a unit-weight decision dominating-set instance $(G = (V, E), K)$, build a Min-Max Multicenter instance on the same graph $G$. Set $w(v) = 1$ for every vertex, set $l(e) = 1$ for every edge, and set the number of centers to $k = K$. + + _Correctness._ ($arrow.r.double$) If $D subset.eq V$ is a dominating set with $|D| <= K$, pad $D$ with arbitrary additional vertices until exactly $K$ centers are chosen. Every vertex is then either a center (distance $0$) or adjacent to one (distance $1$), so the target maximum weighted distance is at most $1$. ($arrow.l.double$) If a set $P subset.eq V$ of exactly $K$ centers has maximum weighted distance at most $1$, then every vertex lies at graph distance $0$ or $1$ from some vertex of $P$. Hence every vertex is either in $P$ or adjacent to a vertex of $P$, so $P$ is a dominating set of size $K$. + + _Solution extraction._ Return the same indicator vector: every chosen target center becomes a chosen source dominating-set vertex. +] + +#let dmds_msmc = load-example( + "DecisionMinimumDominatingSet", + "MinimumSumMulticenter", + source-variant: (graph: "SimpleGraph", weight: "One"), + target-variant: (graph: "SimpleGraph", weight: "i32"), +) +#let dmds_msmc_sol = dmds_msmc.solutions.at(0) +#reduction-rule("DecisionMinimumDominatingSet", "MinimumSumMulticenter", + example: true, + example-source-variant: (graph: "SimpleGraph", weight: "One"), + example-target-variant: (graph: "SimpleGraph", weight: "i32"), + example-caption: [6-vertex unit graph: dominating set of size 2 gives total distance 4], + extra: [ + #pred-commands( + "pred create --example " + problem-spec(dmds_msmc.source) + " -o dmds.json", + "pred reduce dmds.json --to " + target-spec(dmds_msmc) + " -o bundle.json", + "pred solve bundle.json", + "pred evaluate dmds.json --config " + dmds_msmc_sol.source_config.map(str).join(","), + ) + *Step 1 -- Source instance.* The source graph has vertices ${0, 1, 2, 3, 4, 5}$, edges #{dmds_msmc.source.instance.inner.graph.edges.map(e => $(#e.at(0), #e.at(1))$).join(", ")}, and decision bound $K = #dmds_msmc.source.instance.bound$. The stored dominating-set witness is $D = {#dmds_msmc_sol.source_config.enumerate().filter(((i, x)) => x == 1).map(((i, _)) => str(i)).join(", ")}$. + + *Step 2 -- Build the target instance.* Keep the graph unchanged, assign vertex weight $1$ everywhere, assign edge length $1$ everywhere, and set the target center count to $k = #dmds_msmc.target.instance.k$. The comparison threshold is $B = |V| - K = 6 - 2 = 4$. + + *Step 3 -- Verify a witness.* Choosing centers $P = {#dmds_msmc_sol.target_config.enumerate().filter(((i, x)) => x == 1).map(((i, _)) => str(i)).join(", ")}$ yields distances $(0, 1, 1, 0, 1, 1)$ to the nearest center, so the total weighted distance is $4 = B$. The extracted source witness is the same indicator vector, hence a valid YES witness for the original decision instance #sym.checkmark + ], +)[ + This $O(n + m)$ parameter-setting reduction @garey1979[ND51] keeps the graph unchanged, sets every vertex weight and edge length to $1$, copies the decision budget $K$ into the target center count $k$, and compares the target optimum against $B = |V| - K$. On such unit graphs, every exact-$K$ center placement has total distance at least $n - K$, with equality exactly when every non-center vertex is adjacent to a center. +][ + _Construction._ Given a decision dominating-set instance $(G = (V, E), K)$ with unit vertex weights, build a Minimum Sum Multicenter instance on the same graph $G$. Set $w(v) = 1$ for every vertex, set $l(e) = 1$ for every edge, and set the number of centers to $k = K$. Let $n = |V|$, and define the decision threshold $B = n - K$ for the target optimum. + + _Correctness._ ($arrow.r.double$) If $D subset.eq V$ is a dominating set with $|D| <= K$, pad $D$ with arbitrary additional vertices until exactly $K$ centers are chosen. Every chosen center contributes distance $0$, and every non-center vertex is adjacent to at least one chosen center, so every non-center contributes distance $1$. Hence the total weighted distance is exactly $n - K = B$. + + ($arrow.l.double$) Suppose a set $P subset.eq V$ of exactly $K$ centers has total weighted distance at most $B = n - K$. Every non-center vertex has distance at least $1$ from $P$, so any exact-$K$ center placement has total distance at least $n - K$. Therefore total distance at most $n - K$ forces equality, meaning every non-center contributes exactly $1$. Thus every non-center vertex is adjacent to some center in $P$, so $P$ is a dominating set of size $K$. + + _Solution extraction._ Return the same indicator vector: every chosen target center becomes a chosen source dominating-set vertex. +] + #let mvc_mmm = load-example("MinimumVertexCover", "MinimumMaximalMatching") #let mvc_mmm_sol = mvc_mmm.solutions.at(0) #reduction-rule("MinimumVertexCover", "MinimumMaximalMatching", @@ -9432,6 +9505,40 @@ Each reduction is presented as a *Rule* (with linked problem names and overhead _Solution extraction._ From an optimal witness, collect all vertices appearing as singleton operands (indices $< |V|$). In a minimum-length normalized sequence, exactly the $K^*$ cover vertices appear as ${a_0}$-paired singletons. ] +#let mvc_aog = load-example("MinimumVertexCover", "MinimumWeightAndOrGraph") +#let mvc_aog_sol = mvc_aog.solutions.at(0) +#let mvc_aog_cover = mvc_aog_sol.source_config.enumerate().filter(((i, x)) => x == 1).map(((i, x)) => i) +#reduction-rule("MinimumVertexCover", "MinimumWeightAndOrGraph", + example: true, + example-caption: [Path $P_3$: vertex cover ${1}$ maps to an AND/OR graph of weight 5], + extra: [ + #pred-commands( + "pred create --example MVC -o mvc.json", + "pred reduce mvc.json --to " + target-spec(mvc_aog) + " -o bundle.json", + "pred solve bundle.json", + "pred evaluate mvc.json --config " + mvc_aog_sol.source_config.map(str).join(","), + ) + Source VC: $C = {#mvc_aog_cover.map(str).join(", ")}$ (size #mvc_aog_cover.len()) on graph with $n = #graph-num-vertices(mvc_aog.source.instance)$ vertices, $m = #graph-num-edges(mvc_aog.source.instance)$ edges \ + Target AND/OR graph: #mvc_aog.target.instance.num_vertices vertices, source $v_#mvc_aog.target.instance.source$ (AND), arcs: #{range(mvc_aog.target.instance.arcs.len()).map(i => {let a = mvc_aog.target.instance.arcs.at(i); $v_#(a.at(0)) arrow.r v_#(a.at(1))$}).join(", ")} \ + Selected arcs: #{mvc_aog_sol.target_config.enumerate().filter(((i, x)) => x == 1).map(((i, _)) => {let a = mvc_aog.target.instance.arcs.at(i); $v_#(a.at(0)) arrow.r v_#(a.at(1))$}).join(", ")} (weight #{mvc_aog_sol.target_config.enumerate().filter(((i, x)) => x == 1).map(((i, _)) => mvc_aog.target.instance.arc_weights.at(i)).sum()}) #sym.checkmark + ], +)[ + This reduction encodes vertex cover as a minimum-weight solution subgraph problem on a three-layer AND/OR DAG. The root AND gate requires all edges to be covered; each edge becomes an OR gate selecting which endpoint covers it; and each vertex becomes a sink whose arc weight equals the vertex weight. The minimum-weight solution subgraph selects exactly the arcs corresponding to a minimum vertex cover. +][ + _Construction._ Given a Minimum Vertex Cover instance $(G = (V, E), bold(w))$ with $n = |V|$ vertices and $m = |E|$ edges, build an AND/OR graph $D$ with $1 + m + 2n$ vertices arranged in three layers: + + - *Root (AND gate):* A single vertex $r$ (index 0) with gate type AND. + - *Edge layer (OR gates):* For each edge $e_i = {u, v}$ ($i = 0, dots, m-1$), create vertex $e_i$ (index $1 + i$) with gate type OR and an arc $(r, e_i)$ of weight 1. + - *Cover layer (OR gates):* For each source vertex $v_j$ ($j = 0, dots, n-1$), create vertex $c_j$ (index $1 + m + j$) with gate type OR. For each edge $e_i = {u, v}$, add arcs $(e_i, c_u)$ and $(e_i, c_v)$, each of weight 1. + - *Sink layer (leaves):* For each source vertex $v_j$, create leaf $s_j$ (index $1 + m + n + j$) and arc $(c_j, s_j)$ with weight $w_j$. + + Since $r$ is AND, any solution subgraph must include all arcs from $r$ to the edge-layer vertices (cost $m$). Each edge-OR vertex $e_i$ requires at least one of its two outgoing arcs to $c_u$ and $c_v$ (selecting which endpoint covers edge $i$). Each activated cover vertex $c_j$ requires its outgoing arc to $s_j$ (contributing $w_j$). The total weight is $m + |{"activated cover arcs"}| + sum_(j in C) w_j$. + + _Correctness._ ($arrow.r.double$) If $C subset.eq V$ is a vertex cover with weight $W$, then for each edge $e_i = {u, v}$, at least one endpoint lies in $C$; select the arc from $e_i$ to that endpoint's cover vertex. Activate all cover-to-sink arcs for vertices in $C$. This satisfies the AND gate at the root (all edge arcs selected), every edge OR gate (at least one child selected), and all activated cover vertices (sink arc selected). The total weight is $m + |{"edge-to-cover arcs"}| + W$. ($arrow.l.double$) In any valid solution subgraph, the AND root forces all $m$ edge arcs. Each edge OR vertex selects at least one arc to a cover vertex, activating that cover vertex and its sink arc. The set of activated cover vertices forms a vertex cover (every edge has at least one endpoint activated). The sink arc weights sum to the cover weight, so any minimum-weight solution subgraph corresponds to a minimum vertex cover. + + _Solution extraction._ Examine the cover-to-sink arcs (indices $3m, dots, 3m + n - 1$ in the arc list): $c_j = 1$ if arc $(c_j, s_j)$ is selected, $c_j = 0$ otherwise. +] + #reduction-rule("MaximumMatching", "MaximumSetPacking")[ A matching selects edges that share no endpoints; set packing selects sets that share no elements. By representing each edge as the 2-element set of its endpoints and using vertices as the universe, two edges conflict (share an endpoint) if and only if their sets overlap. This embeds matching as a special case of set packing where every set has size exactly 2. ][ @@ -9701,6 +9808,20 @@ where $P$ is a penalty weight large enough that any constraint violation costs m _Solution extraction._ Recover each sign $alpha_j$ from the divisibility of $H - x$ and $H + x$ by $p_j^(N+1)$. For variable coordinates $j = 2M+i$, interpret $alpha_j = -1$ as $x_i = 1$ and $alpha_j = +1$ as $x_i = 0$. ] +#reduction-rule("KSatisfiability", "QuadraticDiophantineEquations")[ + This reduction chains through the Manders--Adleman quadratic congruence construction. Given a 3-SAT instance $phi$, first reduce to a Quadratic Congruences instance $(a, b, c)$ with $x^2 equiv a mod b$ and $x < c$, then convert the bounded congruence into a Diophantine equation $x^2 + b' y = c'$ with $a' = 1$. The conversion exploits the fact that $x < c$ implies $x^2$ is bounded, so the residue $c' - x^2$ is always positive and divisible by $b'$ precisely when the congruence holds. +][ + _Construction._ Given a 3-CNF formula $phi$ with $n$ variables and $m$ clauses: + + *Step 1.* Apply the Manders--Adleman reduction (KSatisfiability $arrow.r$ QuadraticCongruences) to obtain $(a, b, c)$ such that $phi$ is satisfiable iff $exists x < c: x^2 equiv a mod b$. + + *Step 2.* Convert to a Diophantine equation. Let $h = c - 1$. Compute a padding value $p = floor((h^2 - a) \/ b) + 1$ and set $c' = a + b dot p$. Output the Diophantine equation $x^2 + b y = c'$ (i.e., $a' = 1$, $b' = b$). A positive integer $x$ with $x^2 + b y = c'$ must satisfy $y = (c' - x^2) \/ b > 0$, which requires $1 <= x <= h = c - 1$. + + _Correctness._ ($arrow.r.double$) If $phi$ is satisfiable, the congruence has a witness $x_0 < c$ with $x_0^2 equiv a mod b$. Then $x_0^2 - a = b k$ for some non-negative integer $k$. Since $c' = a + b p$ and $x_0 <= c - 1 = h$, we have $c' - x_0^2 = b(p - k) > 0$, and $y = p - k$ is a positive integer. So $(x_0, y)$ is a solution to $x^2 + b y = c'$. ($arrow.l.double$) If $(x, y)$ satisfies $x^2 + b y = c'$ with $x, y >= 1$, then $x^2 = c' - b y equiv c' mod b equiv a mod b$ (since $c' = a + b p$). Also $x^2 < c' = a + b p <= h^2 + b$, and since $y >= 1$ we have $x^2 = c' - b y <= c' - b < h^2 + b - b = h^2$, so $x <= h < c$. Thus $x$ is a valid congruence witness, and the original formula is satisfiable. + + _Solution extraction._ Decode the Diophantine witness $x$ from its little-endian binary encoding. Then extract a 3-SAT assignment by passing $x$ through the congruence-to-SAT extraction (sign recovery from divisibility by prime powers). +] + #let ksat_ss = load-example("KSatisfiability", "SubsetSum") #let ksat_ss_sol = ksat_ss.solutions.at(0) #reduction-rule("KSatisfiability", "SubsetSum", @@ -10712,6 +10833,57 @@ The following reductions to Integer Linear Programming are straightforward formu _Solution extraction._ $D = {v : x_v = 1}$. ] +#let mfdts_ilp = load-example("MinimumFaultDetectionTestSet", "ILP") +#let mfdts_ilp_sol = mfdts_ilp.solutions.at(0) +#reduction-rule("MinimumFaultDetectionTestSet", "ILP", + example: true, + example-caption: [DAG with #mfdts_ilp.source.instance.num_vertices vertices, #mfdts_ilp.source.instance.inputs.len() inputs, #mfdts_ilp.source.instance.outputs.len() outputs, and #(mfdts_ilp.source.instance.num_vertices - mfdts_ilp.source.instance.inputs.len() - mfdts_ilp.source.instance.outputs.len()) internal vertices], + extra: [ + #pred-commands( + "pred create --example " + problem-spec(mfdts_ilp.source) + " -o mfdts.json", + "pred reduce mfdts.json --to " + target-spec(mfdts_ilp) + " -o bundle.json", + "pred solve bundle.json", + "pred evaluate mfdts.json --config " + mfdts_ilp_sol.source_config.map(str).join(","), + ) + + #{ + let source = mfdts_ilp.source.instance + let target = mfdts_ilp.target.instance + let source-config = mfdts_ilp_sol.source_config + let target-config = mfdts_ilp_sol.target_config + [ + *Step 1 -- Source instance.* The canonical DAG has $#source.num_vertices$ vertices and arcs #source.arcs.map(((u, v)) => [$(#u, #v)$]).join(", "). Inputs are ${#source.inputs.map(str).join(", ")}$, outputs are ${#source.outputs.map(str).join(", ")}$, so the internal vertices are ${2, 3, 4}$. The source configuration therefore has $#source.inputs.len() dot #source.outputs.len() = #target.num_vars$ input-output pair bits. + + *Step 2 -- Build the covering ILP.* Order the pair variables as $(#source.inputs.at(0), #source.outputs.at(0))$, $(#source.inputs.at(0), #source.outputs.at(1))$, $(#source.inputs.at(1), #source.outputs.at(0))$, and $(#source.inputs.at(1), #source.outputs.at(1))$. Their internal coverage sets are ${2, 3}$, ${3}$, ${3}$, and ${3, 4}$, so the target has #target.num_vars binary variables, #target.constraints.len() covering constraints, and objective $min (x_0 + x_1 + x_2 + x_3)$. The exported constraints are exactly $x_0 >= 1$, $x_0 + x_1 + x_2 + x_3 >= 1$, and $x_3 >= 1$. + + *Step 3 -- Verify the canonical witness.* The stored ILP witness is $(#target-config.map(str).join(", "))$. Because extraction is identity, the source witness is the same vector $(#source-config.map(str).join(", "))$, which selects pairs $(#source.inputs.at(0), #source.outputs.at(0))$ and $(#source.inputs.at(1), #source.outputs.at(1))$. These two pairs cover internal vertices ${2, 3}$ and ${3, 4}$ respectively, so their union covers every internal vertex and the optimum value is $2$ #sym.checkmark. + + *Multiplicity:* The fixture stores one canonical witness. Any feasible solution must include $x_0 = 1$ to cover internal vertex 2 and $x_3 = 1$ to cover internal vertex 4, so the unique optimum is $(1, 0, 0, 1)$. + ] + } + ], +)[ + This direct $O((|I| + |O|)(|V| + |A|) + |I| |O| |V|)$ ILP encoding#footnote[Standard set-cover ILP formulation over input-output pair coverage sets; no dedicated bibliography key is currently registered in `references.bib`.] precomputes the coverage set of each input-output pair, then introduces one binary selection variable per pair and one covering inequality per internal vertex ($|I| |O|$ variables and $|V backslash (I union O)|$ constraints). +][ + _Construction._ Let the source DAG be $G = (V, A)$ with input set $I = {i_0, dots, i_(a-1)}$ and output set $O = {o_0, dots, o_(b-1)}$. For each ordered pair $(i_r, o_s) in I times O$, define its coverage set + $ + C_(r,s) = {v in V : v " is reachable from " i_r " and can reach " o_s}. + $ + Let $W = V backslash (I union O)$ be the internal vertices. Use `ILP` with binary variables $t_(r,s)$ indexed by the source pair order $p = r b + s$, so the target has $a b$ variables. For every internal vertex $v in W$, add the covering constraint + $ + sum_((r,s) : v in C_(r,s)) t_(r,s) >= 1. + $ + The objective is + $ + min sum_(r = 0)^(a - 1) sum_(s = 0)^(b - 1) t_(r,s), + $ + so the ILP minimizes the number of selected input-output pairs. + + _Correctness._ ($arrow.r.double$) If $T subset.eq I times O$ is a feasible source witness, set $t_(r,s) = 1$ exactly for pairs in $T$. Every internal vertex lies in the coverage set of at least one chosen pair, so each covering inequality holds, and the ILP objective equals $|T|$. ($arrow.l.double$) If the ILP is feasible, let $T = {(i_r, o_s) : t_(r,s) = 1}$. For every internal vertex $v in W$, the corresponding inequality ensures that some selected pair satisfies $v in C_(r,s)$, so the union of the chosen coverage sets covers all internal vertices. The source value is therefore exactly the ILP objective. + + _Solution extraction._ The target variables already form the source binary selection vector in the same pair order, so return them unchanged. +] + #reduction-rule("MinimumFeedbackVertexSet", "ILP")[ A directed graph is a DAG iff it admits a topological ordering. MTZ-style ordering variables enforce this: for each kept vertex, an integer position variable must increase strictly along every arc. Removed vertices relax the ordering constraints via big-$M$ terms. ][ @@ -10735,6 +10907,36 @@ The following reductions to Integer Linear Programming are straightforward formu _Solution extraction._ $S = {v : x_v = 1}$. ] +#let fvs_cg = load-example("MinimumFeedbackVertexSet", "MinimumCodeGenerationUnlimitedRegisters") +#let fvs_cg_sol = fvs_cg.solutions.at(0) +#let fvs_cg_fvs = fvs_cg_sol.source_config.enumerate().filter(((i, x)) => x == 1).map(((i, x)) => i) +#reduction-rule("MinimumFeedbackVertexSet", "MinimumCodeGenerationUnlimitedRegisters", + example: true, + example-caption: [3-cycle digraph: FVS of size 1 maps to an expression DAG needing 1 LOAD], + extra: [ + #pred-commands( + "pred create --example MinimumFeedbackVertexSet -o fvs.json", + "pred reduce fvs.json --to " + target-spec(fvs_cg) + " -o bundle.json", + "pred solve bundle.json", + "pred evaluate fvs.json --config " + fvs_cg_sol.source_config.map(str).join(","), + ) + Source FVS: $F = {#fvs_cg_fvs.map(str).join(", ")}$ (size #fvs_cg_fvs.len()) on a digraph with $n = #fvs_cg.source.instance.graph.num_vertices$ vertices and $m = #fvs_cg.source.instance.graph.arcs.len()$ arcs \ + Target DAG: #fvs_cg.target.instance.num_vertices vertices, left arcs $L$: #{fvs_cg.target.instance.left_arcs.map(a => $#(a.at(0)) arrow.r #(a.at(1))$).join(", ")}, right arcs $R$: #{fvs_cg.target.instance.right_arcs.map(a => $#(a.at(0)) arrow.r #(a.at(1))$).join(", ")} \ + Target evaluation order: $(#fvs_cg_sol.target_config.map(str).join(", "))$ with #fvs_cg_sol.target_config.len() instructions #sym.checkmark + ], +)[ + The Aho--Johnson--Ullman chain gadget construction @ahoJohnsonUllman1977 encodes a feedback vertex set problem as a code generation problem on an expression DAG with unlimited registers and 2-address instructions. Each source vertex becomes a leaf (input register), and each outgoing arc becomes an internal chain node. The number of LOAD (copy) instructions needed in an optimal program equals the size of a minimum feedback vertex set. +][ + _Construction._ Given a directed graph $G = (V, A)$ with $n = |V|$ vertices and $m = |A|$ arcs, build an expression DAG $D$ with $n + m$ vertices as follows. Vertices $0, dots, n-1$ are _leaves_ (one per source vertex), each stored in its own register. For each source vertex $x$ with outgoing arcs $(x, y_1), dots, (x, y_d)$, create a chain of $d$ internal nodes $x^1, dots, x^d$ where: + - $x^1$ has left child $x^0$ (the leaf for $x$) and right child $y_1^0$ (the leaf for $y_1$), + - $x^i$ ($i >= 2$) has left child $x^(i-1)$ (previous chain node) and right child $y_i^0$ (the leaf for $y_i$). + The left operand's register is destroyed by the OP instruction; a LOAD (copy) is needed whenever a leaf register must survive past its destruction. + + _Correctness._ ($arrow.r.double$) If $F subset.eq V$ is a feedback vertex set of size $k$, then $G[V backslash F]$ is a DAG. The topological order of $G[V backslash F]$ induces an evaluation order of the chain nodes such that each leaf $x^0$ with $x in.not F$ is consumed (as a left child) only after all chain nodes that reference it as a right child have been evaluated. Only leaves corresponding to vertices in $F$ need a LOAD instruction (their register is destroyed before some right-child usage). Hence the program uses exactly $n + m - n + k = m + k$ instructions, of which $k$ are LOADs. ($arrow.l.double$) If an optimal program uses $k$ LOAD instructions, the $k$ leaves that require LOADs form a set $F$: removing $F$ from $G$ leaves a DAG (otherwise a directed cycle $v_1 -> dots -> v_l -> v_1$ would require each $v_i^0$ to be consumed before $v_(i+1)^0$, creating a circular register dependency that demands at least one additional LOAD for each cycle). Thus $F$ is a feedback vertex set of size $k$. + + _Solution extraction._ Given a target evaluation order (permutation of internal nodes), identify which leaves require a LOAD: leaf $x^0$ needs a LOAD iff the chain-start node $x^1$ is evaluated before some other internal node that uses $x^0$ as a right child. Set $c_x = 1$ for such vertices and $c_x = 0$ otherwise. +] + #reduction-rule("MaximumClique", "ILP")[ A clique requires every pair of selected vertices to be adjacent; equivalently, no two selected vertices may share a _non_-edge. This is the independent set formulation on the complement graph $overline(G)$. ][ @@ -13233,6 +13435,69 @@ The following reductions to Integer Linear Programming are straightforward formu _Solution extraction._ Output the first $m$ variables $(f_0, dots, f_(m-1))$ as the flow assignment. ] +#{ + let mfas_mlr = load-example("MinimumFeedbackArcSet", "MaximumLikelihoodRanking") + let mfas_mlr_sol = mfas_mlr.solutions.at(0) + let source-arcs = mfas_mlr.source.instance.graph.arcs + let target-matrix = mfas_mlr.target.instance.matrix + let ranking = mfas_mlr_sol.target_config + let removed-indices = mfas_mlr_sol.source_config.enumerate().filter(((i, x)) => x == 1).map(((i, _)) => i) + let removed-arcs = removed-indices.map(i => source-arcs.at(i)) + let target-cost = 0 + for a in range(target-matrix.len()) { + for b in range(target-matrix.len()) { + if a != b and ranking.at(a) > ranking.at(b) { + target-cost += target-matrix.at(a).at(b) + } + } + } + let fmt-mat(m) = m.map(row => row.map(v => str(v)).join(", ")).join("; ") + [ + #reduction-rule("MinimumFeedbackArcSet", "MaximumLikelihoodRanking", + example: true, + example-caption: [5-vertex digraph ($n = #mfas_mlr.source.instance.graph.num_vertices$, $|A| = #source-arcs.len()$, unit weights) mapped to a skew-symmetric ranking matrix], + extra: [ + #pred-commands( + "pred create --example " + problem-spec(mfas_mlr.source) + " -o mfas.json", + "pred reduce mfas.json --to " + target-spec(mfas_mlr) + " -o bundle.json", + "pred solve bundle.json", + "pred evaluate mfas.json --config " + mfas_mlr_sol.source_config.map(str).join(","), + ) + + *Step 1 -- Source instance.* The source digraph has vertices ${#range(mfas_mlr.source.instance.graph.num_vertices).map(str).join(", ")}$ and arcs #{source-arcs.map(a => $(#(a.at(0)) arrow #(a.at(1)))$).join(", ")}, all with unit weight. The extracted optimal feedback arc set removes #{removed-arcs.map(a => $(#(a.at(0)) arrow #(a.at(1)))$).join(" and ")}, so $|F| = #removed-arcs.len()$. + + *Step 2 -- Build the comparison matrix.* The reduction keeps the same item set and writes $M_(i j) = 1$ when only $i arrow j$ exists, $M_(i j) = -1$ when only $j arrow i$ exists, and $M_(i j) = 0$ otherwise. For this instance, + $ M = mat(#fmt-mat(target-matrix)) $. + Every off-diagonal pair sums to $0$, so the target is a valid Maximum Likelihood Ranking instance with $c = 0$. + + *Step 3 -- Verify a solution.* The stored ranking vector is $(#ranking.map(str).join(", "))$, interpreted as the map from items to ranks. The target disagreement cost is $#target-cost = 2 dot #removed-arcs.len() - #source-arcs.len()$, and the extracted source witness is exactly the backward-arc set #{removed-arcs.map(a => $(#(a.at(0)) arrow #(a.at(1)))$).join(" and ")} #sym.checkmark + + *Multiplicity:* The fixture stores one canonical optimum. Other optimal rankings exist because the DAG obtained after removing the two backward arcs has multiple valid topological orders. + ], + )[ + This $O(n^2)$ reduction @garey1979 applies to unit-weight feedback arc set instances. It keeps the same vertex set as ranking items and encodes each unordered pair by a skew-symmetric entry in $\{-1, 0, 1\}$ with comparison count $c = 0$. + ][ + _Construction._ Given a unit-weight Minimum Feedback Arc Set instance $(G = (V, A), bold(1))$ with $V = \{0, dots, n - 1\}$, construct the matrix $M in ZZ^(n times n)$ by setting $M_(i i) = 0$ and, for every distinct pair $i, j$, + $ + M_(i j) = cases( + 1 & "if" (i arrow j) in A and (j arrow i) not in A, \ + -1 & "if" (j arrow i) in A and (i arrow j) not in A, \ + 0 & "otherwise" + ). + $ + Then $M_(i j) + M_(j i) = 0$ for all $i != j$, so the target is a valid Maximum Likelihood Ranking instance with $n$ items. + + _Correctness._ ($arrow.r.double$) Let $pi$ be any ranking and let $B(pi) = \{(u arrow v) in A : pi(u) > pi(v)\}$ be its backward arcs. Removing $B(pi)$ leaves only forward arcs, hence a DAG, so $B(pi)$ is a feedback arc set. Partition unordered vertex pairs into one-directional pairs $A_1$ and bidirectional pairs $A_2$. Every one-directional backward arc contributes $+1$ to the MLR objective, every one-directional forward arc contributes $-1$, and bidirectional or absent pairs contribute $0$. Therefore + $ + "cost"(pi) = 2 |B(pi)| - (|A_1| + 2|A_2|) = 2 |B(pi)| - |A|. + $ + The target objective is thus the source objective shifted by the constant $-|A|$, so minimizing disagreement cost minimizes feedback arc set size. ($arrow.l.double$) Let $F subset.eq A$ be a minimum feedback arc set, and take a topological order $pi$ of the DAG $G - F$. Every arc in $A backslash F$ is forward in $pi$, hence every backward arc under $pi$ lies in $F$, so $B(pi) subset.eq F$. Since $B(pi)$ is itself a feedback arc set by the previous argument, minimality of $F$ forces $|B(pi)| = |F|$. Therefore an optimal source solution yields an optimal target ranking. + + _Solution extraction._ Given the target rank vector, output one source bit per source arc $(u arrow v)$ in source-arc order: set the bit to $1$ iff item $u$ is ranked after item $v$, and to $0$ otherwise. + ] + ] +} + #{ let mlr_ilp = load-example("MaximumLikelihoodRanking", "ILP") let mlr_ilp_sol = mlr_ilp.solutions.at(0) @@ -13869,6 +14134,37 @@ The following table shows concrete variable overhead for example instances, take _Solution extraction._ For each variable $x_i$, inspect the truth-setting pair. Set $x_i = 1$ when the cover contains $u_i$, and set $x_i = 0$ otherwise. ] +#let dmvc_hc = load-example("DecisionMinimumVertexCover", "HamiltonianCircuit") +#let dmvc_hc_sol = dmvc_hc.solutions.at(0) +#reduction-rule("DecisionMinimumVertexCover", "HamiltonianCircuit", + example: true, + example-caption: [3-vertex path with bound $k = #dmvc_hc.source.instance.bound$: one selector threads two cover-testing gadgets], + extra: [ + #pred-commands( + "pred create --example " + problem-spec(dmvc_hc.source) + " -o dmvc.json", + "pred reduce dmvc.json --to " + target-spec(dmvc_hc) + " -o bundle.json", + "pred solve bundle.json", + "pred evaluate dmvc.json --config " + dmvc_hc_sol.source_config.map(str).join(","), + ) + + *Step 1 -- Source instance.* The canonical Decision Minimum Vertex Cover fixture has inner graph $G$ on vertices ${0, 1, 2}$ with edges #{dmvc_hc.source.instance.inner.graph.edges.map(e => $(#e.at(0), #e.at(1))$).join(", ")} and unit weights. The bound is $k = #dmvc_hc.source.instance.bound$, and the stored cover witness is $(#dmvc_hc_sol.source_config.map(str).join(", "))$, i.e.\ $C = {1}$. + + *Step 2 -- Build the Hamiltonian graph.* There is one selector vertex $a_1$ and one 12-vertex gadget for each source edge, so the target graph has $1 + 2 dot 12 = #graph-num-vertices(dmvc_hc.target.instance)$ vertices. The path for source vertex $1$ chains the two gadgets through the connector from $(1, e_0, 6)$ to $(1, e_1, 1)$. The completed target has #graph-num-edges(dmvc_hc.target.instance) edges. + + *Step 3 -- Verify a witness.* The stored Hamiltonian circuit is $(#dmvc_hc_sol.target_config.map(str).join(", "))$. Reading the cycle between selector contacts shows that the unique selector traverses first the gadget for edge $(0,1)$ in the "only vertex 1 chosen" mode and then the gadget for edge $(1,2)$ in the same mode, visiting every one of the 25 target vertices exactly once. Extracting the selector-adjacent path endpoints returns the source cover $(#dmvc_hc_sol.source_config.map(str).join(", ")) = {1}$, which indeed covers both source edges #sym.checkmark + + *Multiplicity:* The fixture stores one canonical Hamiltonian circuit. Rotating or reversing that same cycle yields equivalent target witnesses with the same extracted cover. + ], +)[ + Garey and Johnson's Theorem 3.4 replaces each source edge by a 12-vertex cover-testing gadget and uses $k$ selector vertices to choose $k$ source vertices whose incident gadget-paths together cover every gadget @garey1979. In the unit-weight decision setting, the constructed graph is Hamiltonian iff the source graph has a vertex cover of size at most $k$. +][ + _Construction._ Let the source be a unit-weight Decision Minimum Vertex Cover instance $(G = (V, E), k)$ with $G$ simple. For each edge $e = {u, v} in E$, create a gadget with vertices $(u, e, i)$ and $(v, e, i)$ for $1 <= i <= 6$. Add the two 6-chains on the $u$-side and $v$-side together with the four cross edges ${(u, e, 3), (v, e, 1)}$, ${(v, e, 3), (u, e, 1)}$, ${(u, e, 6), (v, e, 4)}$, and ${(v, e, 6), (u, e, 4)}$. For every source vertex $v$, order its incident edges as $e_(v[1]), dots, e_(v[deg(v)])$ and connect ${(v, e_(v[i]), 6), (v, e_(v[i+1]), 1)}$ for $1 <= i < deg(v)$, forming one path that contains exactly the gadget copies labeled by $v$. Finally add selector vertices $a_1, dots, a_k$ and join each selector to both endpoints of every non-isolated vertex-path. Thus the theorem branch has $k + 12|E|$ vertices and $14|E| + sum_(v in V^+) (deg(v)-1) + 2k|V^+|$ edges, where $V^+ = {v in V : deg(v) > 0}$. + + _Correctness._ ($arrow.r.double$) Suppose $C subset.eq V$ is a vertex cover with $|C| <= k$. Because all weights are 1, we may pad $C$ with arbitrary additional non-isolated vertices until it has exactly $k$ elements, say $v_1, dots, v_k$. For every edge gadget $e = {u, v}$, traverse it in one of the three gadget modes from @garey1979: if only $u in C$, follow the unique Hamiltonian path from $(u, e, 1)$ to $(u, e, 6)$ through all 12 gadget vertices; if only $v in C$, use the symmetric path from $(v, e, 1)$ to $(v, e, 6)$ through all 12 vertices; if both endpoints lie in $C$, use the two disjoint side paths from $(u, e, 1)$ to $(u, e, 6)$ and from $(v, e, 1)$ to $(v, e, 6)$. Chaining these gadget traversals along the paths for $v_1, dots, v_k$ and connecting consecutive paths through the selectors yields a Hamiltonian circuit of the target graph. ($arrow.l.double$) Suppose the target graph has a Hamiltonian circuit. Each selector has degree two inside the circuit and therefore cuts the circuit into $k$ selector-to-selector segments. Inside any edge gadget, the circuit can appear only in the three modes above, so each segment must stay on the path corresponding to one source vertex. Mark a source vertex $v$ selected exactly when both endpoints of its path are adjacent to selectors in the Hamiltonian circuit. This selects exactly $k$ source vertices. Every edge gadget must be completely visited, and that is possible only if at least one of its endpoint paths is selected, so every source edge has a selected endpoint. Hence the extracted set is a vertex cover of size at most $k$. + + _Solution extraction._ Given a Hamiltonian circuit witness, inspect the two endpoints of each source vertex-path. Set $x_v = 1$ iff both path endpoints are adjacent to selector vertices in the cycle; otherwise set $x_v = 0$. The resulting indicator vector is a valid source-side vertex cover. +] + #let ksat_mvc = load-example("KSatisfiability", "MinimumVertexCover") #let ksat_mvc_sol = ksat_mvc.solutions.at(0) #reduction-rule("KSatisfiability", "MinimumVertexCover", @@ -14038,6 +14334,39 @@ The following table shows concrete variable overhead for example instances, take _Solution extraction._ For each variable $x_k$, set $tau(x_k) = 1$ iff $s_k^+$ appears before $s_k^-$ in the realization. ] +#let ksat_rs = load-example("KSatisfiability", "RegisterSufficiency") +#let ksat_rs_sol = ksat_rs.solutions.at(0) +#reduction-rule("KSatisfiability", "RegisterSufficiency", + example: true, + example-caption: [Two-clause 3-SAT instance ($n = #ksat_rs.source.instance.num_vars$, $m = #sat-num-clauses(ksat_rs.source.instance)$) reduced to Register Sufficiency], + extra: [ + #pred-commands( + "pred create --example " + problem-spec(ksat_rs.source) + " -o ksat.json", + "pred reduce ksat.json --to " + target-spec(ksat_rs) + " -o bundle.json", + "pred solve bundle.json", + "pred evaluate ksat.json --config " + ksat_rs_sol.source_config.map(str).join(","), + ) + + *Step 1 -- Source instance.* The canonical source formula is $phi = (x_1 or overline(x)_2 or x_3) and (overline(x)_1 or x_2 or overline(x)_3)$. The stored satisfying assignment is $(#ksat_rs_sol.source_config.map(str).join(", "))$. + + *Step 2 -- Build Sethi's DAG.* Here $n = #ksat_rs.source.instance.num_vars$, $m = #sat-num-clauses(ksat_rs.source.instance)$, and $b = max(0, 2n - m) = 4$. The construction creates $|A| = 2n + 1 = 7$, $|B| = b = 4$, $|C| = m = 2$, $|F| = 3m = 6$, $|M| = 3$, $|R| = n(n+1) = 12$, $|S| = |T| = n^2 = 9$, $|U| = 2n = 6$, $|W| = |Z| = n = 3$, and $|X| = 2n = 6$, for a total of $|V| = #ksat_rs.target.instance.num_vertices = 70$ vertices. The target bound is $K = #ksat_rs.target.instance.bound = 23$, and the construction emits $|A| = #ksat_rs.target.instance.arcs.len() = 152$ arcs. For clause $C_1$, the six literal-lock arcs are $(x_1^+, f_(1,1))$, $(x_2^-, f_(1,2))$, $(x_3^+, f_(1,3))$, $(x_1^-, f_(1,2))$, $(x_1^-, f_(1,3))$, and $(x_2^+, f_(1,3))$. + + *Step 3 -- Verify extraction at $w_n$.* The target witness is a computation ordering on $70$ vertices. Reading the prefix ending at $w_3$ marks exactly those variables whose $x_k^+$ node has already been computed, which reconstructs $(#ksat_rs_sol.source_config.map(str).join(", "))$ #sym.checkmark. Evaluating the original 3-SAT formula under that assignment returns true #sym.checkmark + + *Multiplicity:* The fixture stores one canonical satisfying assignment. Different satisfying assignments can induce different valid computation orders in the target DAG. + ], +)[ + Sethi's Reduction I / Theorem 3.11 @sethi1975 @garey1979[PO1] builds a dependency DAG whose register pressure mirrors a literal-assignment phase followed by a clause-verification phase. For a 3-CNF formula with $n$ variables, $m$ clauses, and $b = max(0, 2n - m)$, the target has $3n^2 + 9n + 4m + b + 4$ vertices, $6n^2 + 19n + 16m + 2b + 1$ arcs, and register bound $K = 3m + 4n + 1 + b$. +][ + _Construction._ Let $phi = and_(i=1)^m C_i$ be a 3-CNF formula over variables $x_1, dots, x_n$, where $C_i = (Y_(i,1) or Y_(i,2) or Y_(i,3))$. Define $b = max(0, 2n - m)$. Create node families $A = {a_j : 1 <= j <= 2n+1}$, $B = {b_j : 1 <= j <= b}$, $C = {c_i : 1 <= i <= m}$, $F = {f_(i,j) : 1 <= i <= m, 1 <= j <= 3}$, $M = {"initial", d, "final"}$, $R = {r_(k,j) : 1 <= k <= n, 1 <= j <= 2n-2k+2}$, $S = {s_(k,j) : 1 <= k <= n, 1 <= j <= 2n-2k+1}$, $T = {t_(k,j) : 1 <= k <= n, 1 <= j <= 2n-2k+1}$, $U = {u_(k,1), u_(k,2) : 1 <= k <= n}$, $W = {w_k : 1 <= k <= n}$, $X = {x_k^+, x_k^- : 1 <= k <= n}$, and $Z = {z_k : 1 <= k <= n}$. Add the ten arc families from Sethi's theorem exactly as stated in the issue: $"initial"$ depends on every node in $A union B union F union U$, every node in $C union R union S union T union W$ depends on $"initial"$, $"final"$ depends on $W union X union Z union {"initial", d}$, each variable gadget links $x_k^+$ and $x_k^-$ to $z_k$, $u_(k,1)$, $u_(k,2)$, $s_(k,*)$, $t_(k,*)$, $r_(k,*)$, and each clause gadget links $c_i$ to $w_n$, $z_n$, its three $f_(i,j)$ nodes, and the literal-lock edges determined by whether $Y_(i,j)$ is positive or negative. + + _Correctness._ ($arrow.r.double$) Suppose $phi$ is satisfiable under assignment $tau$. Execute Sethi's eight-stage schedule. In the variable phase, the chain gadgets force a choice between the positive and negative side of each variable, and the schedule can be arranged so that by the moment $w_n$ is computed, $x_k^+$ has appeared iff $tau(x_k) = 1$. Because every clause has a satisfied literal, the corresponding $f_(i,j)$ node is unlocked during the clause phase, and the lock edges from the opposite literals prevent incompatible clause traversals. Sethi proves that this entire computation uses at most $K$ registers, so the target Register Sufficiency instance is feasible. + + ($arrow.l.double$) Suppose the target instance has a computation using at most $K$ registers. Stop immediately after $w_n$ is computed. At that snapshot, for each variable gadget, at most one of $x_k^+$ and $x_k^-$ has been computed. Define $tau(x_k) = 1$ iff $x_k^+$ has already been computed. Now assume some clause $C_i$ is false under $tau$. Then every literal $Y_(i,j)$ is false, so the corresponding literal node has not yet been computed by the $w_n$ snapshot. Consequently each $f_(i,j)$ still has an uncomputed literal predecessor. Sethi's clause-phase invariant leaves no free register between the computation of $w_n$ and the later computation of $d$, so such a clause node $c_i$ could not be discharged without exceeding $K$, contradiction. Therefore every clause contains a true literal under $tau$, and $phi$ is satisfiable. + + _Solution extraction._ Given a target computation ordering, let $t(w_n)$ be the position of $w_n$. Output $x_k = 1$ exactly when $t(x_k^+) < t(w_n)$. This is the corrected extraction rule: the snapshot is taken at $w_n$, not $z_n$, and the sign test uses $x_k^+$, not $x_k^-$. +] + #reduction-rule("FeasibleRegisterAssignment", "ILP", example: false, )[ @@ -14050,6 +14379,18 @@ The following table shows concrete variable overhead for example instances, take _Solution extraction._ Read vertex positions from the permutation matrix. ] +#reduction-rule("RegisterSufficiency", "ILP", + example: false, +)[ + Direct ILP formulation of Register Sufficiency: integer evaluation times, latest-use times, binary pair-order selectors, and per-step live-value indicators. For a DAG with $n$ vertices, $m$ arcs, and $s$ sinks, the ILP has $(7n^2 + 3n)/2$ variables and $(21n^2 + 3n)/2 + 2m + s$ constraints. +][ + _Construction._ Let the source DAG use the repository convention that an arc $(v, u)$ means vertex $v$ depends on vertex $u$. Introduce integer variables $t_v in {0, dots, n-1}$ for evaluation positions and $l_v in {0, dots, n}$ for latest-use positions. For every unordered vertex pair ${u, v}$, add a binary selector $b_(u,v)$ with big-$M$ constraints forcing either $t_u < t_v$ or $t_v < t_u$; since all $t_v$ lie in the interval ${0, dots, n-1}$, the positions form a permutation. For every dependency arc $(v, u)$, enforce $t_v >= t_u + 1$ and $l_u >= t_v$. For every sink vertex (no dependents), set $l_u = n$. For each vertex-step pair $(u, s)$ with $s in {0, dots, n-1}$, add binary threshold variables $p_(u,s)$ and $q_(u,s)$ satisfying $p_(u,s) = 1$ iff $t_u <= s$ and $q_(u,s) = 1$ iff $l_u > s$, plus a binary live indicator $h_(u,s) = p_(u,s) and q_(u,s)$. Finally impose $sum_u h_(u,s) <= K$ for every step $s$. + + _Correctness._ ($arrow.r.double$) Any valid computation ordering of the source DAG yields a feasible ILP solution: assign each $t_v$ to the vertex position in the ordering, each $l_v$ to the latest dependent position (or $n$ for sinks), and derive the binary threshold/live variables from those integers. The dependency constraints hold by topological validity, and the live-count inequalities hold because the source witness uses at most $K$ registers. ($arrow.l.double$) Any feasible ILP solution gives distinct positions $t_v$, hence a permutation of the vertices, and the arc constraints make that permutation topological. The live indicators $h_(u,s)$ certify exactly which values remain live after step $s$, so the step constraints prove that no more than $K$ values are simultaneously live. Therefore the extracted ordering is a valid Register Sufficiency witness. + + _Solution extraction._ Return the first $n$ ILP coordinates $(t_0, dots, t_(n-1))$ as the vertex evaluation positions. +] + #let part_swi = load-example("Partition", "SequencingWithinIntervals") #let part_swi_sol = part_swi.solutions.at(0) #reduction-rule("Partition", "SequencingWithinIntervals", @@ -14690,6 +15031,87 @@ The following table shows concrete variable overhead for example instances, take _Solution extraction._ The selection vector is unchanged. ] +#let x3c_mfdts = load-example("ExactCoverBy3Sets", "MinimumFaultDetectionTestSet") +#let x3c_mfdts_sol = x3c_mfdts.solutions.at(0) +#reduction-rule("ExactCoverBy3Sets", "MinimumFaultDetectionTestSet", + example: true, + example-caption: [#x3c_mfdts.source.instance.subsets.len() triples over $3q = #x3c_mfdts.source.instance.universe_size$ elements, with one shared output], + extra: [ + #pred-commands( + "pred create --example " + problem-spec(x3c_mfdts.source) + " -o x3c.json", + "pred reduce x3c.json --to " + target-spec(x3c_mfdts) + " -o bundle.json", + "pred solve bundle.json", + "pred evaluate x3c.json --config " + x3c_mfdts_sol.source_config.map(str).join(","), + ) + + #let q = x3c_mfdts.source.instance.universe_size / 3 + *Step 1 -- Source instance.* The X3C fixture has universe $U = {0, dots, #(x3c_mfdts.source.instance.universe_size - 1)}$ with $q = #q$ and triples + #for (i, s) in x3c_mfdts.source.instance.subsets.enumerate() [ + $C_#i = {#s.map(str).join(", ")}$#if i < x3c_mfdts.source.instance.subsets.len() - 1 [, ] else [.] + ] + + *Step 2 -- Build the fault-detection DAG.* Create one input vertex for each triple, one internal vertex for each universe element, and one shared output. The target therefore has $#x3c_mfdts.target.instance.num_vertices$ vertices, $#x3c_mfdts.target.instance.arcs.len()$ arcs, inputs ${#x3c_mfdts.target.instance.inputs.map(str).join(", ")}$, and output ${#x3c_mfdts.target.instance.outputs.map(str).join(", ")}$. Input $i_j$ connects to exactly the three internal vertices for elements in $C_j$, and every internal vertex connects to the shared output. + + *Step 3 -- Verify the canonical witness.* The stored source configuration $(#x3c_mfdts_sol.source_config.map(str).join(", "))$ selects $C_0 = {#x3c_mfdts.source.instance.subsets.at(0).map(str).join(", ")}$ and $C_1 = {#x3c_mfdts.source.instance.subsets.at(1).map(str).join(", ")}$, which are disjoint and cover all six universe elements. The target configuration is identical: $(#x3c_mfdts_sol.target_config.map(str).join(", "))$. Pair $(0, #(x3c_mfdts.target.instance.outputs.at(0)))$ covers internal vertices ${0, 1, 2}$, pair $(1, #(x3c_mfdts.target.instance.outputs.at(0)))$ covers ${3, 4, 5}$, and together they cover every internal vertex with value $#q$ #sym.checkmark. + + *Multiplicity:* The fixture stores one canonical witness. Any target witness of value $q$ selects exactly $q$ inputs, and since each selected pair covers only 3 internal vertices while there are $3q$ internal vertices overall, those $q$ neighborhoods must be pairwise disjoint and form an exact cover. + ], +)[ + This $O(m + q)$ reduction adapts the classical X3C gadget behind Fault Detection in Directed Graphs @garey1979[MS18] to the repository's internal-vertex coverage semantics. It creates one input per source triple, one internal vertex per universe element, and one shared output, so the target has $m + 3q + 1$ vertices and $3m + 3q$ arcs. The X3C instance is a YES-instance if and only if the Minimum Fault Detection Test Set optimum is at most $q = |U| / 3$. +][ + _Construction._ Let the X3C instance be $(U, cal(C))$ with $|U| = 3q$ and $cal(C) = {C_0, dots, C_(m-1)}$, where each $C_j subset.eq U$ has size $3$. Create input vertices $i_0, dots, i_(m-1)$, internal vertices $e_u$ for every $u in U$, and one shared output vertex $o$. Add arcs $(i_j, e_u)$ exactly when $u in C_j$, and add $(e_u, o)$ for every $u in U$. The implemented target counts only internal vertices, so the coverage requirement applies precisely to the $3q$ vertices $e_u$. + + _Correctness._ ($arrow.r.double$) If $cal(C)' subset.eq cal(C)$ is an exact cover, select the corresponding input-output pairs $(i_j, o)$. Each chosen pair covers exactly the three internal vertices $e_u$ with $u in C_j$, and the $q$ chosen triples partition $U$, so all $3q$ internal vertices are covered using $q$ pairs. ($arrow.l.double$) Suppose the target admits a witness of value at most $q$. Every selected pair covers at most three internal vertices, while there are $3q$ internal vertices to cover, so an optimal witness of value at most $q$ must in fact have value exactly $q$ and each selected pair must cover three previously uncovered internal vertices. Hence the corresponding source triples are pairwise disjoint and together cover all of $U$, yielding an exact cover. + + _Solution extraction._ The target configuration has one coordinate per source triple (there is only one output), so the extraction map is the identity. +] + +#let x3c_mas = load-example("ExactCoverBy3Sets", "MinimumAxiomSet") +#let x3c_mas_sol = x3c_mas.solutions.at(0) +#reduction-rule("ExactCoverBy3Sets", "MinimumAxiomSet", + example: true, + example-caption: [#x3c_mas.source.instance.subsets.len() triples over $3q = #x3c_mas.source.instance.universe_size$ elements, with decision bound $q = #(x3c_mas.source.instance.universe_size / 3)$], + extra: [ + #pred-commands( + "pred create --example " + problem-spec(x3c_mas.source) + " -o x3c.json", + "pred reduce x3c.json --to " + target-spec(x3c_mas) + " -o bundle.json", + "pred solve bundle.json", + "pred evaluate x3c.json --config " + x3c_mas_sol.source_config.map(str).join(","), + ) + + #let x3c_mas_q = x3c_mas.source.instance.universe_size / 3 + *Step 1 -- Source instance.* The X3C fixture has universe $U = {0, dots, #(x3c_mas.source.instance.universe_size - 1)}$ with $q = #x3c_mas_q$ and candidate triples + #for (i, s) in x3c_mas.source.instance.subsets.enumerate() [ + $C_#i = {#s.map(str).join(", ")}$#if i < x3c_mas.source.instance.subsets.len() - 1 [, ] else [.] + ] + + *Step 2 -- Build the axiom system.* Create one element-sentence for each universe element and one set-sentence for each triple, so the target has $#x3c_mas.target.instance.num_sentences$ sentences and $#x3c_mas.target.instance.true_sentences.len()$ true sentences. Each triple contributes three forward implications and one backward implication, giving $#x3c_mas.target.instance.implications.len()$ implications total. The optimization instance itself stores only the axiom-system data; the X3C bound $q = #x3c_mas_q$ is checked externally against the optimum target value. + + *Step 3 -- Verify the canonical witness.* The stored source config $(#x3c_mas_sol.source_config.map(str).join(", "))$ selects $C_3 = {#x3c_mas.source.instance.subsets.at(3).map(str).join(", ")}$ and $C_4 = {#x3c_mas.source.instance.subsets.at(4).map(str).join(", ")}$. These two triples are disjoint and cover all six universe elements #sym.checkmark. The target axiom vector $(#x3c_mas_sol.target_config.map(str).join(", "))$ selects exactly the set-sentence coordinates $#(x3c_mas.source.instance.universe_size + 3)$ and $#(x3c_mas.source.instance.universe_size + 4)$, namely $z_3$ and $z_4$. One closure round derives every element-sentence $e_0, dots, e_5$; then the backward rules derive the remaining set-sentences $z_0, z_1, z_2$, so the closure equals all $#x3c_mas.target.instance.true_sentences.len()$ true sentences. This witness therefore attains value $#x3c_mas_q$ #sym.checkmark, and extracting the chosen set-sentence coordinates recovers the exact cover. + + *Multiplicity:* The fixture stores one canonical witness. Any target witness of value $q$ must select exactly $q$ set-sentences and no element-sentences, because each direct element axiom lowers the maximum possible element coverage by two. + ], +)[ + This $O(m)$ reduction @garey1979[LO17] encodes each source triple as a set-sentence with three forward implications to its elements and one backward implication from those three element-sentences back to the set-sentence. The target has $3q + m$ sentences and $4m$ implications, and the X3C instance is a YES-instance if and only if the Minimum Axiom Set optimum is at most $q = |U| / 3$. +][ + _Construction._ Let the X3C instance be $(U, cal(C))$ with $|U| = 3q$ and $cal(C) = {C_0, dots, C_(m-1)}$, where each $C_j = {a_j, b_j, c_j}$ has size $3$. Create one element-sentence $e_u$ for each $u in U$ and one set-sentence $z_j$ for each $C_j$, and let $T = S$ be the full sentence set. For every triple $C_j = {a_j, b_j, c_j}$ add the four implications + $ + ({z_j}, e_(a_j)), quad ({z_j}, e_(b_j)), quad ({z_j}, e_(c_j)), quad ({e_(a_j), e_(b_j), e_(c_j)}, z_j). + $ + + _Variable mapping._ Source coordinate $j$ corresponds to set-sentence $z_j$. The target configuration lists the $3q$ element-sentence coordinates first and the $m$ set-sentence coordinates second. + + _Correctness._ ($arrow.r.double$) If $cal(C)' subset.eq cal(C)$ is an exact cover, then $|cal(C)'| = q$. Choose the corresponding set-sentences as the only axioms. Their forward implications derive all $3q$ element-sentences in one round because the cover spans $U$. Once every element-sentence is true, every backward implication fires, so every set-sentence becomes true and the full closure equals $T$. Hence the target optimum is at most $q$. + + ($arrow.l.double$) Suppose some axiom set $A$ of size at most $q$ derives all of $T$. Write $A = E union Z$, where $E$ contains the chosen element-sentences and $Z$ the chosen set-sentences. Backward implications never create a new element-sentence: if a backward rule derives $z_j$, its antecedent already contains the three element-sentences of $C_j$, and the forward rules from $z_j$ only repeat those same elements. Therefore the closure's element-sentences are exactly the chosen element axioms together with the elements that lie in triples indexed by $Z$. Since the closure contains all $3q$ element-sentences, + $ + 3q <= |E| + 3 |Z| <= |E| + 3(q - |E|) = 3q - 2 |E|. + $ + Thus $|E| = 0$ and then $|Z| = q$. Equality also forces the chosen triples to be pairwise disjoint and to cover all $3q$ elements, so the corresponding source subsets form an exact cover. + + _Solution extraction._ Given a target axiom vector, keep only the coordinates of the set-sentences $z_0, dots, z_(m-1)$. On every optimal target witness of value $q$, these coordinates select exactly $q$ disjoint triples covering $U$, so they are an X3C witness. +] + // ── Batch of 18 reduction rules from derivation document ── // 1. SubsetSum → Partition (#973) @@ -14857,7 +15279,102 @@ The following table shows concrete variable overhead for example instances, take _Solution extraction._ Given a partition $V_0, dots, V_(k-1)$ into cliques, assign color $i$ to every vertex in $V_i$. ] -// 4. KSatisfiability → Kernel (#882) +// 4. KColoring → Clustering (#924) +#reduction-rule("KColoring", "Clustering")[ + This $O(n^2)$ reduction @garey1979[MS9] @brucker1978clustering keeps the vertex set as the ground set, assigns distance 1 to adjacent pairs and distance 0 to nonadjacent pairs, and fixes $K = 3$ and $B = 0$. A feasible clustering is therefore exactly a partition of the graph into at most three independent sets. +][ + _Construction._ Given a 3-Coloring instance $(G = (V, E), 3)$, build a Clustering instance on the same ground set $X = V$. Set $d(u, v) = 1$ if ${u, v} in E$, set $d(u, v) = 0$ if ${u, v} in.not E$, and set $d(u, u) = 0$ for every vertex $u$. Set the cluster bound to $K = 3$ and the diameter bound to $B = 0$. + + _Correctness._ ($arrow.r.double$) A proper 3-coloring partitions $V$ into at most three color classes, and each color class is an independent set. Because independent sets contain no adjacent pair, every two vertices inside one color class have distance 0, so these classes form a feasible clustering. ($arrow.l.double$) Conversely, a feasible clustering with $B = 0$ puts only distance-0 pairs in the same cluster. By construction distance 0 means nonadjacent, so every cluster is an independent set. Assigning one color per cluster yields a proper 3-coloring. + + _Solution extraction._ Read the cluster label of each source vertex as its color label. +] + +#let clustering_ilp = load-example("Clustering", "ILP") +#let clustering_ilp_sol = clustering_ilp.solutions.at(0) +#reduction-rule("Clustering", "ILP", + example: true, + example-caption: [4 elements, $K = 2$, $B = 1$ $arrow.r$ ILP with #clustering_ilp.target.instance.num_vars variables and #clustering_ilp.target.instance.constraints.len() constraints], + extra: [ + #pred-commands( + "pred create --example " + problem-spec(clustering_ilp.source) + " -o clustering.json", + "pred reduce clustering.json --to " + target-spec(clustering_ilp) + " -o bundle.json", + "pred solve bundle.json", + "pred evaluate clustering.json --config " + clustering_ilp_sol.source_config.map(str).join(","), + ) + + #{ + let n = clustering_ilp.source.instance.distances.len() + let k = clustering_ilp.source.instance.num_clusters + let b = clustering_ilp.source.instance.diameter_bound + let distances = clustering_ilp.source.instance.distances + let source-config = clustering_ilp_sol.source_config + [ + *Step 1 -- Source instance.* The canonical source has $n = #n$ elements, cluster bound $K = #k$, and diameter bound $B = #b$. Its distance rows are #distances.map(row => "(" + row.map(str).join(", ") + ")").join("; "), so the only pairs above the bound are $(0, 2)$, $(0, 3)$, $(1, 2)$, and $(1, 3)$. + + *Step 2 -- Build the ILP.* Introduce one binary variable $x_(i,c)$ for each element-cluster pair, giving $n K = #(n * k)$ variables. The $n = #n$ assignment equalities $sum_c x_(i,c) = 1$ force every element into exactly one cluster, and the four violating pairs contribute $4 dot K = #(4 * k)$ conflict inequalities. The stored target therefore has #clustering_ilp.target.instance.num_vars variables and #clustering_ilp.target.instance.constraints.len() constraints. + + *Step 3 -- Verify the canonical witness.* The stored ILP vector is $(#clustering_ilp_sol.target_config.map(str).join(", "))$. Reading each block of $K = #k$ variables yields the clustering $(#source-config.map(str).join(", "))$, so cluster 0 contains elements ${#source-config.enumerate().filter(((i, c)) => c == 0).map(((i, c)) => str(i)).join(", ")}$ and cluster 1 contains elements ${#source-config.enumerate().filter(((i, c)) => c == 1).map(((i, c)) => str(i)).join(", ")}$. The only within-cluster distances are $d(0,1) = #distances.at(0).at(1)$ and $d(2,3) = #distances.at(2).at(3)$, both at most $B$ #sym.checkmark. + + *Multiplicity:* The fixture stores one canonical witness. Swapping the two cluster labels gives an equivalent second witness because the ILP distinguishes clusters only by index. + ] + } + ], +)[ + This direct $O(n^2 K)$ ILP encoding#footnote[Standard ILP formulation of diameter-bounded clustering; no specific literature key is currently registered in `references.bib`.] introduces one binary variable $x_(i,c)$ for each element $i$ and cluster $c$, forces every element into exactly one cluster, and forbids every pair with $d(i,j) > B$ from sharing a cluster ($n K$ variables and at most $n + n^2 K$ constraints). +][ + _Construction._ Given a Clustering instance on elements $X = {0, dots, n - 1}$ with cluster bound $K$ and diameter bound $B$, create binary variables $x_(i,c) in {0, 1}$ for every element $i in X$ and cluster $c in {0, dots, K - 1}$. Interpret $x_(i,c) = 1$ iff element $i$ is assigned to cluster $c$. + + For every element $i$, add the assignment constraint $sum_(c=0)^(K-1) x_(i,c) = 1$. For every pair $i < j$ with $d(i,j) > B$ and every cluster $c$, add the conflict constraint $x_(i,c) + x_(j,c) <= 1$. Use the zero objective and minimize it, so the target is a pure feasibility ILP. + + _Correctness._ ($arrow.r.double$) If the source instance has a feasible clustering, set $x_(i,c) = 1$ exactly for the cluster assigned to element $i$. Every element is assigned once, so all assignment equalities hold. Whenever $d(i,j) > B$, the source clustering never puts $i$ and $j$ together, so for every cluster $c$ at least one of $x_(i,c)$ or $x_(j,c)$ is 0 and every conflict inequality holds. ($arrow.l.double$) If the ILP is feasible, the assignment equalities choose exactly one cluster for each element. If some extracted cluster contained a pair $i, j$ with $d(i,j) > B$, then for that cluster $c$ we would have $x_(i,c) = x_(j,c) = 1$, contradicting the conflict inequality. Hence every extracted cluster has diameter at most $B$, so the extracted assignment is a feasible clustering using at most $K$ clusters. + + _Variable mapping._ The implementation stores variable $x_(i,c)$ at index $i dot K + c$, i.e.\ consecutive blocks of $K$ variables per element. + + _Solution extraction._ For each element $i$, scan the block $(x_(i,0), dots, x_(i,K-1))$ and return the unique cluster $c$ with value 1. +] + +// 5. PartitionIntoCliques → MinimumCoveringByCliques (#889) +#let pic_mcbc = load-example("PartitionIntoCliques", "MinimumCoveringByCliques") +#let pic_mcbc_sol = pic_mcbc.solutions.at(0) +#reduction-rule("PartitionIntoCliques", "MinimumCoveringByCliques", + example: true, + example-caption: [$n = #graph-num-vertices(pic_mcbc.source.instance)$ vertices, $m = #graph-num-edges(pic_mcbc.source.instance)$ edges, $K = #pic_mcbc.source.instance.num_cliques$], + extra: [ + #pred-commands( + "pred create --example " + problem-spec(pic_mcbc.source) + " -o partition-into-cliques.json", + "pred reduce partition-into-cliques.json --to " + target-spec(pic_mcbc) + " -o bundle.json", + "pred solve bundle.json", + "pred evaluate partition-into-cliques.json --config " + pic_mcbc_sol.source_config.map(str).join(","), + ) + + *Step 1 -- Source instance.* Graph $G$ with $n = #graph-num-vertices(pic_mcbc.source.instance)$ vertices, $m = #graph-num-edges(pic_mcbc.source.instance)$ edge, and clique bound $K = #pic_mcbc.source.instance.num_cliques$. The stored partition witness is $(#pic_mcbc_sol.source_config.map(str).join(", "))$, namely the cliques ${0,1}$ and ${2}$. + + *Step 2 -- Orlin construction.* The target graph has $#graph-num-vertices(pic_mcbc.target.instance)$ vertices and $#graph-num-edges(pic_mcbc.target.instance)$ edges. Because the source has two directed edge copies, the construction adds the gadgets $Q_(0,1)$ and $Q_(1,0)$, plus the side cliques $L^*$ and $R^*$. The threshold is $K' = K + 2m + 2 = #(pic_mcbc.source.instance.num_cliques + 2 * graph-num-edges(pic_mcbc.source.instance) + 2)$. + + *Step 3 -- Verify the witness.* The target witness labels $#pic_mcbc_sol.target_config.len()$ target edges with 6 clique IDs, corresponding to $D_1 = {x_0, x_1, y_0, y_1}$, $D_2 = {x_2, y_2}$, $Q_(0,1)$, $Q_(1,0)$, $L^*$, and $R^*$. Reading only the labels on the matching edges $x_i y_i$ recovers the source partition $(#pic_mcbc_sol.source_config.map(str).join(", "))$ #sym.checkmark. + + *Multiplicity:* The fixture stores one canonical witness. Any permutation of the six target clique labels is equivalent. + ], +)[ + This $O((n + 2m)^2)$ reduction @garey1979[GT17] @orlin1977 @kouStockmeyerWong1978 inlines Orlin's vertex-clique-cover to edge-clique-cover construction. Each source vertex $v_i$ becomes left/right copies $x_i$ and $y_i$, each directed source edge contributes a 4-vertex gadget, and two side cliques account for the additive $2m + 2$ slack. The target graph has an edge-clique cover of size at most $K + 2m + 2$ if and only if the source graph admits a partition into at most $K$ cliques. +][ + _Construction._ Let $(G = (V, E), K)$ be a Partition Into Cliques instance with $V = {v_1, dots, v_n}$ and $m = |E|$. Define the directed-edge index set $A = {(i,j) : i != j and {v_i,v_j} in E}$, so $|A| = 2m$. Create vertices $x_i, y_i$ for each source vertex $v_i$, gadget vertices $a_(i,j), b_(i,j)$ for each $(i,j) in A$, and two special vertices $z_L, z_R$. Let $L = {x_i : 1 <= i <= n} union {a_(i,j) : (i,j) in A}$ and $R = {y_i : 1 <= i <= n} union {b_(i,j) : (i,j) in A}$. Make $L$ a clique and $R$ a clique, join $z_L$ to every vertex of $L$ and $z_R$ to every vertex of $R$, add each matching edge $x_i y_i$, and for every $(i,j) in A$ add the four cross edges $x_i y_j$, $x_i b_(i,j)$, $a_(i,j) y_j$, and $a_(i,j) b_(i,j)$. Output the resulting Minimum Covering by Cliques instance. It has + $ + |V(H)| = 2n + 4m + 2, quad |E(H)| = (n + 2m)^2 + 2n + 10m, + $ + and threshold $K' = K + 2m + 2$. + + _Correctness._ ($arrow.r.double$) Suppose $G$ is partitioned into cliques $C_1, dots, C_t$ with $t <= K$. For each source clique $C_r$, define $D_r = {x_i, y_i : v_i in C_r}$. For each $(i,j) in A$, define $Q_(i,j) = {x_i, a_(i,j), b_(i,j), y_j}$. Also define $L^* = L union {z_L}$ and $R^* = R union {z_R}$. Every $D_r$ is a clique: if $v_i, v_j in C_r$ with $i != j$, then ${v_i,v_j} in E$, so the construction includes both cross edges $x_i y_j$ and $x_j y_i$. The family ${D_1, dots, D_t} union {Q_(i,j) : (i,j) in A} union {L^*, R^*}$ therefore covers every target edge, using at most $K + 2m + 2$ cliques. + + ($arrow.l.double$) Conversely, suppose $H$ has an edge-clique cover with at most $K + 2m + 2$ cliques. Each gadget edge $a_(i,j) b_(i,j)$ belongs to the unique maximal clique $Q_(i,j)$, so covering all $2m$ such edges requires at least $2m$ distinct cliques. Likewise, some clique must contain $z_L$ and some clique must contain $z_R$, and neither of those cliques can contain a matching edge $x_i y_i$. Hence at most $K$ cliques remain available for the matching edges. If two matching edges $x_i y_i$ and $x_j y_j$ lie in the same target clique, that clique contains the four vertices $x_i, y_i, x_j, y_j$, so in particular $x_i y_j$ is an edge of $H$; by construction this implies ${v_i,v_j} in E$. Therefore the source vertices whose matching edges share one target-clique label form a clique of $G$. Grouping each $v_i$ by the label used on $x_i y_i$ yields a partition of $V$ into at most $K$ cliques. + + _Variable mapping._ The source witness labels source vertices by clique. The target witness labels target edges by the covering clique that contains them. + + _Solution extraction._ Inspect the label assigned to each matching edge $x_i y_i$. Compress the distinct matching-edge labels to $0, dots, k-1$ and assign source vertex $v_i$ to the compressed label of its matching edge. The previous paragraph proves that these label classes are source cliques, and the forced gadget/side cliques guarantee $k <= K$ whenever the target cover has size at most $K + 2m + 2$. +] + +// 6. KSatisfiability → Kernel (#882) #let ksat_ker = load-example("KSatisfiability", "Kernel") #let ksat_ker_sol = ksat_ker.solutions.at(0) #reduction-rule("KSatisfiability", "Kernel", @@ -14902,7 +15419,7 @@ The following table shows concrete variable overhead for example instances, take _Solution extraction._ Read only the positive literal vertices: $alpha(x_i) = 1$ iff the even-indexed vertex for $x_i$ is in the kernel. Any selected clause vertices are ignored during extraction. ] -// 5. HamiltonianPath → DegreeConstrainedSpanningTree (#911) +// 7. HamiltonianPath → DegreeConstrainedSpanningTree (#911) #let hp_dcst = load-example("HamiltonianPath", "DegreeConstrainedSpanningTree") #let hp_dcst_sol = hp_dcst.solutions.at(0) #reduction-rule("HamiltonianPath", "DegreeConstrainedSpanningTree", @@ -14934,7 +15451,7 @@ The following table shows concrete variable overhead for example instances, take _Solution extraction._ Collect the selected edges, find an endpoint (degree 1 vertex), walk the path to produce the vertex permutation. ] -// 6. NAESatisfiability → SetSplitting (#382) +// 8. NAESatisfiability → SetSplitting (#382) #let nae_ss = load-example("NAESatisfiability", "SetSplitting") #let nae_ss_sol = nae_ss.solutions.at(0) #reduction-rule("NAESatisfiability", "SetSplitting", diff --git a/docs/paper/references.bib b/docs/paper/references.bib index 87380e54..fc9cd0a8 100644 --- a/docs/paper/references.bib +++ b/docs/paper/references.bib @@ -164,6 +164,17 @@ @article{brucker1977 doi = {10.1016/S0167-5060(08)70743-X} } +@incollection{brucker1978clustering, + author = {Peter Brucker}, + title = {On the Complexity of Clustering Problems}, + booktitle = {Optimization and Operations Research}, + series = {Lecture Notes in Economics and Mathematical Systems}, + volume = {157}, + pages = {45--54}, + publisher = {Springer}, + year = {1978} +} + @article{lawler1977, author = {Eugene L. Lawler}, title = {A pseudopolynomial algorithm for sequencing jobs to minimize total tardiness}, @@ -242,6 +253,28 @@ @book{garey1979 year = {1979} } +@article{orlin1977, + author = {James B. Orlin}, + title = {Contentment in Graph Theory: Covering Graphs with Cliques}, + journal = {Indagationes Mathematicae (Proceedings)}, + volume = {80}, + number = {5}, + pages = {406--424}, + year = {1977}, + doi = {10.1016/1385-7258(77)90055-5} +} + +@article{kouStockmeyerWong1978, + author = {Lawrence T. Kou and Larry J. Stockmeyer and C. K. Wong}, + title = {Covering Edges by Cliques with Regard to Keyword Conflicts and Intersection Graphs}, + journal = {Communications of the ACM}, + volume = {21}, + number = {2}, + pages = {135--139}, + year = {1978}, + doi = {10.1145/359340.359346} +} + @article{galilMegiddo1977, author = {Zvi Galil and Nimrod Megiddo}, title = {Cyclic Ordering is NP-Complete}, diff --git a/problemreductions-cli/tests/cli_tests.rs b/problemreductions-cli/tests/cli_tests.rs index 9c0eccae..809670b6 100644 --- a/problemreductions-cli/tests/cli_tests.rs +++ b/problemreductions-cli/tests/cli_tests.rs @@ -7642,7 +7642,15 @@ fn test_show_ksat_works() { fn test_path_all_max_paths_truncates() { // With --max-paths 3, should limit to 3 paths and indicate truncation let output = pred() - .args(["path", "MIS", "QUBO", "--all", "--max-paths", "3", "--json"]) + .args([ + "path", + "KSat", + "QUBO", + "--all", + "--max-paths", + "3", + "--json", + ]) .output() .unwrap(); assert!( @@ -7661,17 +7669,17 @@ fn test_path_all_max_paths_truncates() { paths.len() ); assert_eq!(envelope["max_paths"], 3); - // MIS -> QUBO has many paths, so truncation is expected + // KSat -> QUBO has many paths, so truncation is expected assert_eq!( envelope["truncated"], true, - "should be truncated since MIS->QUBO has many paths" + "should be truncated since KSat->QUBO has many paths" ); } #[test] fn test_path_all_max_paths_text_truncation_note() { let output = pred() - .args(["path", "MIS", "QUBO", "--all", "--max-paths", "2"]) + .args(["path", "KSat", "QUBO", "--all", "--max-paths", "2"]) .output() .unwrap(); assert!(output.status.success()); diff --git a/src/models/algebraic/quadratic_diophantine_equations.rs b/src/models/algebraic/quadratic_diophantine_equations.rs index 039ac21a..7fdc2984 100644 --- a/src/models/algebraic/quadratic_diophantine_equations.rs +++ b/src/models/algebraic/quadratic_diophantine_equations.rs @@ -1,11 +1,16 @@ //! Quadratic Diophantine Equations problem implementation. //! -//! Given positive integers a, b, c, determine whether there exist -//! positive integers x, y such that ax² + by = c. +//! Given positive integers `a`, `b`, and `c`, determine whether there exist +//! positive integers `x`, `y` such that `a x^2 + b y = c`. +//! +//! The witness integer `x` is encoded as a little-endian binary vector so the +//! model can represent large reductions without fixed-width overflow. use crate::registry::{FieldInfo, ProblemSchemaEntry, ProblemSizeFieldEntry}; use crate::traits::Problem; use crate::types::Or; +use num_bigint::{BigUint, ToBigUint}; +use num_traits::{One, Zero}; use serde::de::Error as _; use serde::{Deserialize, Deserializer, Serialize}; @@ -16,11 +21,11 @@ inventory::submit! { aliases: &["QDE"], dimensions: &[], module_path: module_path!(), - description: "Decide whether ax² + by = c has a solution in positive integers x, y", + description: "Decide whether ax^2 + by = c has a solution in positive integers x, y", fields: &[ - FieldInfo { name: "a", type_name: "u64", description: "Coefficient of x²" }, - FieldInfo { name: "b", type_name: "u64", description: "Coefficient of y" }, - FieldInfo { name: "c", type_name: "u64", description: "Right-hand side constant" }, + FieldInfo { name: "a", type_name: "BigUint", description: "Coefficient of x^2" }, + FieldInfo { name: "b", type_name: "BigUint", description: "Coefficient of y" }, + FieldInfo { name: "c", type_name: "BigUint", description: "Right-hand side constant" }, ], } } @@ -28,55 +33,92 @@ inventory::submit! { inventory::submit! { ProblemSizeFieldEntry { name: "QuadraticDiophantineEquations", - fields: &["c"], + fields: &["bit_length_a", "bit_length_b", "bit_length_c"], } } /// Quadratic Diophantine Equations problem. /// -/// Given positive integers a, b, c, determine whether there exist -/// positive integers x, y such that ax² + by = c. -/// -/// # Example +/// Given positive integers `a`, `b`, and `c`, determine whether there exist +/// positive integers `x`, `y` such that `a x^2 + b y = c`. /// -/// ``` -/// use problemreductions::models::algebraic::QuadraticDiophantineEquations; -/// use problemreductions::{Problem, Solver, BruteForce}; -/// -/// // a=3, b=5, c=53: x=1 gives y=10, x=4 gives y=1 -/// let problem = QuadraticDiophantineEquations::new(3, 5, 53); -/// let solver = BruteForce::new(); -/// let witness = solver.find_witness(&problem); -/// assert!(witness.is_some()); -/// ``` +/// The configuration encodes `x` in little-endian binary: +/// `config[i] in {0,1}` is the coefficient of `2^i`. #[derive(Debug, Clone, Serialize)] pub struct QuadraticDiophantineEquations { - /// Coefficient of x². - a: u64, + /// Coefficient of x^2. + #[serde(with = "crate::models::misc::biguint_serde::decimal_biguint")] + a: BigUint, /// Coefficient of y. - b: u64, + #[serde(with = "crate::models::misc::biguint_serde::decimal_biguint")] + b: BigUint, /// Right-hand side constant. - c: u64, + #[serde(with = "crate::models::misc::biguint_serde::decimal_biguint")] + c: BigUint, +} + +fn bit_length(value: &BigUint) -> usize { + if value.is_zero() { + 0 + } else { + let bytes = value.to_bytes_be(); + let msb = *bytes.first().expect("nonzero BigUint has bytes"); + 8 * (bytes.len() - 1) + (8 - msb.leading_zeros() as usize) + } } impl QuadraticDiophantineEquations { - fn validate_inputs(a: u64, b: u64, c: u64) -> Result<(), String> { - if a == 0 { + fn validate_inputs(a: &BigUint, b: &BigUint, c: &BigUint) -> Result<(), String> { + if a.is_zero() { return Err("Coefficient a must be positive".to_string()); } - if b == 0 { + if b.is_zero() { return Err("Coefficient b must be positive".to_string()); } - if c == 0 { + if c.is_zero() { return Err("Right-hand side c must be positive".to_string()); } Ok(()) } + fn isqrt(n: &BigUint) -> BigUint { + if n.is_zero() { + return BigUint::zero(); + } + + let mut low = BigUint::zero(); + let mut high = BigUint::one() << bit_length(n).div_ceil(2); + + while low < high { + let mid = (&low + &high + BigUint::one()) >> 1usize; + if &mid * &mid <= *n { + low = mid; + } else { + high = mid - BigUint::one(); + } + } + + low + } + /// Create a new QuadraticDiophantineEquations instance, returning an error /// instead of panicking when inputs are invalid. - pub fn try_new(a: u64, b: u64, c: u64) -> Result { - Self::validate_inputs(a, b, c)?; + pub fn try_new(a: A, b: B, c: C) -> Result + where + A: ToBigUint, + B: ToBigUint, + C: ToBigUint, + { + let a = a + .to_biguint() + .ok_or_else(|| "Coefficient a must be nonnegative".to_string())?; + let b = b + .to_biguint() + .ok_or_else(|| "Coefficient b must be nonnegative".to_string())?; + let c = c + .to_biguint() + .ok_or_else(|| "Right-hand side c must be nonnegative".to_string())?; + Self::validate_inputs(&a, &b, &c)?; Ok(Self { a, b, c }) } @@ -84,79 +126,140 @@ impl QuadraticDiophantineEquations { /// /// # Panics /// - /// Panics if any of a, b, c is zero. - pub fn new(a: u64, b: u64, c: u64) -> Self { + /// Panics if any of `a`, `b`, `c` is zero. + pub fn new(a: A, b: B, c: C) -> Self + where + A: ToBigUint, + B: ToBigUint, + C: ToBigUint, + { Self::try_new(a, b, c).unwrap_or_else(|msg| panic!("{msg}")) } - /// Get the coefficient a (coefficient of x²). - pub fn a(&self) -> u64 { - self.a + /// Get the coefficient a (coefficient of x^2). + pub fn a(&self) -> &BigUint { + &self.a } /// Get the coefficient b (coefficient of y). - pub fn b(&self) -> u64 { - self.b + pub fn b(&self) -> &BigUint { + &self.b } /// Get the right-hand side constant c. - pub fn c(&self) -> u64 { - self.c + pub fn c(&self) -> &BigUint { + &self.c + } + + /// Number of bits needed to encode the coefficient a. + pub fn bit_length_a(&self) -> usize { + bit_length(&self.a) + } + + /// Number of bits needed to encode the coefficient b. + pub fn bit_length_b(&self) -> usize { + bit_length(&self.b) } - /// Compute the integer square root of n (floor(sqrt(n))). - fn isqrt(n: u64) -> u64 { - if n == 0 { - return 0; + /// Number of bits needed to encode the constant c. + pub fn bit_length_c(&self) -> usize { + bit_length(&self.c) + } + + fn max_x(&self) -> BigUint { + if self.c < self.a { + return BigUint::zero(); } - let mut x = (n as f64).sqrt() as u64; - // Correct for floating-point imprecision. - while x.saturating_mul(x) > n { - x -= 1; + Self::isqrt(&(&self.c / &self.a)) + } + + fn witness_bit_length(&self) -> usize { + let max_x = self.max_x(); + if max_x.is_zero() { + 0 + } else { + bit_length(&max_x) + } + } + + /// Encode a candidate witness integer `x` as a little-endian binary configuration. + pub fn encode_witness(&self, x: &BigUint) -> Option> { + if x.is_zero() || x > &self.max_x() { + return None; } - while (x + 1).saturating_mul(x + 1) <= n { - x += 1; + + let num_bits = self.witness_bit_length(); + let mut remaining = x.clone(); + let mut config = Vec::with_capacity(num_bits); + + for _ in 0..num_bits { + config.push(if (&remaining & BigUint::one()).is_zero() { + 0 + } else { + 1 + }); + remaining >>= 1usize; + } + + if remaining.is_zero() { + Some(config) + } else { + None } - x } - /// Compute the maximum value of x (floor(sqrt(c/a))). - /// Returns 0 if c < a (no positive x is possible since x >= 1 requires a*1 <= c). - fn max_x(&self) -> u64 { - if self.c < self.a { - return 0; + /// Decode a little-endian binary configuration into its candidate witness `x`. + pub fn decode_witness(&self, config: &[usize]) -> Option { + if config.len() != self.witness_bit_length() || config.iter().any(|&digit| digit > 1) { + return None; } - Self::isqrt(self.c / self.a) + + let mut value = BigUint::zero(); + let mut weight = BigUint::one(); + for &digit in config { + if digit == 1 { + value += &weight; + } + weight <<= 1usize; + } + Some(value) } /// Check whether a given x yields a valid positive integer y. /// - /// Returns Some(y) if y is a positive integer, None otherwise. - pub fn check_x(&self, x: u64) -> Option { - if x == 0 { + /// Returns `Some(y)` if `y` is a positive integer, `None` otherwise. + pub fn check_x(&self, x: &BigUint) -> Option { + if x.is_zero() { return None; } - let ax2 = self.a.checked_mul(x)?.checked_mul(x)?; + + let ax2 = &self.a * x * x; if ax2 >= self.c { return None; } - let remainder = self.c - ax2; - if !remainder.is_multiple_of(self.b) { + + let remainder = &self.c - ax2; + if (&remainder % &self.b) != BigUint::zero() { return None; } - let y = remainder / self.b; - if y == 0 { + + let y = remainder / &self.b; + if y.is_zero() { return None; } + Some(y) } } #[derive(Deserialize)] struct QuadraticDiophantineEquationsData { - a: u64, - b: u64, - c: u64, + #[serde(with = "crate::models::misc::biguint_serde::decimal_biguint")] + a: BigUint, + #[serde(with = "crate::models::misc::biguint_serde::decimal_biguint")] + b: BigUint, + #[serde(with = "crate::models::misc::biguint_serde::decimal_biguint")] + c: BigUint, } impl<'de> Deserialize<'de> for QuadraticDiophantineEquations { @@ -178,38 +281,42 @@ impl Problem for QuadraticDiophantineEquations { } fn dims(&self) -> Vec { - let max_x = self.max_x() as usize; - if max_x == 0 { - // No valid x exists; return empty config space. - return vec![0]; + let num_bits = self.witness_bit_length(); + if num_bits == 0 { + Vec::new() + } else { + vec![2; num_bits] } - // One variable: x ranges over {1, ..., max_x}. - // config[0] in {0, ..., max_x - 1} maps to x = config[0] + 1. - vec![max_x] } fn evaluate(&self, config: &[usize]) -> Or { - Or({ - if config.len() != 1 { - return Or(false); - } - let x = (config[0] as u64) + 1; // 1-indexed - self.check_x(x).is_some() - }) + let Some(x) = self.decode_witness(config) else { + return Or(false); + }; + + if x.is_zero() || x > self.max_x() { + return Or(false); + } + + Or(self.check_x(&x).is_some()) } } crate::declare_variants! { - default QuadraticDiophantineEquations => "sqrt(c)", + default QuadraticDiophantineEquations => "2^bit_length_c", } #[cfg(feature = "example-db")] pub(crate) fn canonical_model_example_specs() -> Vec { + let instance = QuadraticDiophantineEquations::new(3u32, 5u32, 53u32); + let optimal_config = instance + .encode_witness(&BigUint::from(1u32)) + .expect("x=1 should be a valid canonical witness"); + vec![crate::example_db::specs::ModelExampleSpec { id: "quadratic_diophantine_equations", - instance: Box::new(QuadraticDiophantineEquations::new(3, 5, 53)), - // x=1 (config[0]=0) gives y=10: 3*1 + 5*10 = 53 - optimal_config: vec![0], + instance: Box::new(instance), + optimal_config, optimal_value: serde_json::json!(true), }] } diff --git a/src/models/decision.rs b/src/models/decision.rs index c7c0ac38..7ef3d129 100644 --- a/src/models/decision.rs +++ b/src/models/decision.rs @@ -1,6 +1,6 @@ //! Generic decision wrapper for optimization problems. -use crate::rules::{AggregateReductionResult, ReduceToAggregate}; +use crate::rules::{AggregateReductionResult, ReduceTo, ReduceToAggregate, ReductionResult}; use crate::traits::Problem; use crate::types::{OptimizationValue, Or}; use serde::de::DeserializeOwned; @@ -62,6 +62,7 @@ macro_rules! register_decision_variant { } } + // Decision

→ P: both witness (identity config) and aggregate (solve + compare) $crate::inventory::submit! { $crate::rules::ReductionEntry { source_name: $name, @@ -70,7 +71,14 @@ macro_rules! register_decision_variant { target_variant_fn: <$inner as $crate::traits::Problem>::variant, overhead_fn: || $crate::rules::ReductionOverhead::identity(&[$($sg_name),*]), module_path: module_path!(), - reduce_fn: None, + reduce_fn: Some(|any| { + let source = any + .downcast_ref::<$crate::models::decision::Decision<$inner>>() + .expect(concat!($name, " witness reduction source type mismatch")); + Box::new( + <$crate::models::decision::Decision<$inner> as $crate::rules::ReduceTo<$inner>>::reduce_to(source), + ) + }), reduce_aggregate_fn: Some(|any| { let source = any .downcast_ref::<$crate::models::decision::Decision<$inner>>() @@ -79,7 +87,7 @@ macro_rules! register_decision_variant { <$crate::models::decision::Decision<$inner> as $crate::rules::ReduceToAggregate<$inner>>::reduce_to_aggregate(source), ) }), - capabilities: $crate::rules::EdgeCapabilities::aggregate_only(), + capabilities: $crate::rules::EdgeCapabilities::both(), overhead_eval_fn: |any| { let source = any .downcast_ref::<$crate::models::decision::Decision<$inner>>() @@ -245,6 +253,51 @@ where } } +/// Witness reduction result for `Decision

-> P`. +/// +/// The configuration spaces are identical — a config that is optimal for +/// `P` and meets the bound is a valid `Decision

` witness. The +/// `extract_solution` is the identity function. +#[derive(Debug, Clone)] +pub struct DecisionToOptimizationWitnessResult

+where + P: Problem, + P::Value: OptimizationValue, +{ + target: P, +} + +impl

ReductionResult for DecisionToOptimizationWitnessResult

+where + P: DecisionProblemMeta + 'static, + P::Value: OptimizationValue + Serialize + DeserializeOwned, +{ + type Source = Decision

; + type Target = P; + + fn target_problem(&self) -> &Self::Target { + &self.target + } + + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + target_solution.to_vec() + } +} + +impl

ReduceTo

for Decision

+where + P: DecisionProblemMeta + Clone + 'static, + P::Value: OptimizationValue + Serialize + DeserializeOwned, +{ + type Result = DecisionToOptimizationWitnessResult

; + + fn reduce_to(&self) -> Self::Result { + DecisionToOptimizationWitnessResult { + target: self.inner.clone(), + } + } +} + #[cfg(test)] #[path = "../unit_tests/models/decision.rs"] mod tests; diff --git a/src/models/formula/ksat.rs b/src/models/formula/ksat.rs index 9faae18d..7cf13427 100644 --- a/src/models/formula/ksat.rs +++ b/src/models/formula/ksat.rs @@ -183,6 +183,11 @@ impl KSatisfiability { self.clauses().iter().map(|c| c.len()).sum() } + /// Padding term used by Sethi's Register Sufficiency reduction. + pub fn register_sufficiency_padding(&self) -> usize { + (2 * self.num_vars).saturating_sub(self.num_clauses()) + } + pub fn simultaneous_incongruences_num_incongruences(&self) -> usize { first_n_odd_primes(self.num_vars) .into_iter() diff --git a/src/models/graph/min_max_multicenter.rs b/src/models/graph/min_max_multicenter.rs index bddae245..5bf60ef5 100644 --- a/src/models/graph/min_max_multicenter.rs +++ b/src/models/graph/min_max_multicenter.rs @@ -17,7 +17,7 @@ inventory::submit! { aliases: &["pCenter"], dimensions: &[ VariantDimension::new("graph", "SimpleGraph", &["SimpleGraph"]), - VariantDimension::new("weight", "i32", &["i32"]), + VariantDimension::new("weight", "i32", &["i32", "One"]), ], module_path: module_path!(), description: "Find K centers minimizing the maximum weighted distance from any vertex to its nearest center (vertex p-center)", @@ -273,6 +273,7 @@ where crate::declare_variants! { default MinMaxMulticenter => "1.4969^num_vertices", + MinMaxMulticenter => "1.4969^num_vertices", } #[cfg(feature = "example-db")] diff --git a/src/models/graph/minimum_dominating_set.rs b/src/models/graph/minimum_dominating_set.rs index 683dc016..0e1d5223 100644 --- a/src/models/graph/minimum_dominating_set.rs +++ b/src/models/graph/minimum_dominating_set.rs @@ -7,7 +7,7 @@ use crate::models::decision::Decision; use crate::registry::{FieldInfo, ProblemSchemaEntry, VariantDimension}; use crate::topology::{Graph, SimpleGraph}; use crate::traits::Problem; -use crate::types::{Min, WeightElement}; +use crate::types::{Min, One, WeightElement}; use num_traits::Zero; use serde::{Deserialize, Serialize}; use std::collections::HashSet; @@ -19,7 +19,7 @@ inventory::submit! { aliases: &[], dimensions: &[ VariantDimension::new("graph", "SimpleGraph", &["SimpleGraph"]), - VariantDimension::new("weight", "i32", &["i32"]), + VariantDimension::new("weight", "i32", &["i32", "One"]), ], module_path: module_path!(), description: "Find minimum weight dominating set in a graph", @@ -166,6 +166,7 @@ where crate::declare_variants! { default MinimumDominatingSet => "1.4969^num_vertices", + MinimumDominatingSet => "1.4969^num_vertices", } impl crate::models::decision::DecisionProblemMeta for MinimumDominatingSet @@ -194,6 +195,23 @@ impl Decision> { } } +impl Decision> { + /// Number of vertices in the underlying graph. + pub fn num_vertices(&self) -> usize { + self.inner().num_vertices() + } + + /// Number of edges in the underlying graph. + pub fn num_edges(&self) -> usize { + self.inner().num_edges() + } + + /// Decision bound as a nonnegative integer. + pub fn k(&self) -> usize { + (*self.bound()).try_into().unwrap_or(0) + } +} + crate::register_decision_variant!( MinimumDominatingSet, "DecisionMinimumDominatingSet", @@ -202,7 +220,7 @@ crate::register_decision_variant!( "Decision version: does a dominating set of cost <= bound exist?", dims: [ VariantDimension::new("graph", "SimpleGraph", &["SimpleGraph"]), - VariantDimension::new("weight", "i32", &["i32"]), + VariantDimension::new("weight", "i32", &["i32", "One"]), ], fields: [ FieldInfo { name: "graph", type_name: "G", description: "The underlying graph G=(V,E)" }, @@ -212,6 +230,133 @@ crate::register_decision_variant!( size_getters: [("num_vertices", num_vertices), ("num_edges", num_edges)] ); +impl crate::traits::DeclaredVariant for Decision> {} + +inventory::submit! { + crate::registry::VariantEntry { + name: > as Problem>::NAME, + variant_fn: > as Problem>::variant, + complexity: "1.4969^num_vertices", + complexity_eval_fn: |any| { + let problem = any + .downcast_ref::>>() + .expect("DecisionMinimumDominatingSet complexity source type mismatch"); + 1.4969_f64.powf(problem.num_vertices() as f64) + }, + is_default: false, + factory: |data| { + serde_json::from_value::>>(data) + .map(|problem| Box::new(problem) as Box) + }, + serialize_fn: |any| { + any.downcast_ref::>>() + .and_then(|problem| serde_json::to_value(problem).ok()) + }, + solve_value_fn: |any| { + let problem = any + .downcast_ref::>>() + .expect("DecisionMinimumDominatingSet value solve source type mismatch"); + let (value, _) = crate::solvers::BruteForce::new().solve_with_witnesses(problem); + crate::registry::format_metric(&value) + }, + solve_witness_fn: |any| { + let problem = any + .downcast_ref::>>() + .expect("DecisionMinimumDominatingSet witness solve source type mismatch"); + let (value, witnesses) = crate::solvers::BruteForce::new().solve_with_witnesses(problem); + witnesses + .into_iter() + .next() + .map(|config| (config, crate::registry::format_metric(&value))) + }, + } +} + +// Decision> → MDS: both witness (identity config) and aggregate (solve + compare) +inventory::submit! { + crate::rules::ReductionEntry { + source_name: "DecisionMinimumDominatingSet", + target_name: "MinimumDominatingSet", + source_variant_fn: > as Problem>::variant, + target_variant_fn: as Problem>::variant, + overhead_fn: || crate::rules::ReductionOverhead::identity(&["num_vertices", "num_edges"]), + module_path: module_path!(), + reduce_fn: Some(|any| { + let source = any + .downcast_ref::>>() + .expect("DecisionMinimumDominatingSet witness reduction source type mismatch"); + Box::new( + > as crate::rules::ReduceTo< + MinimumDominatingSet, + >>::reduce_to(source), + ) + }), + reduce_aggregate_fn: Some(|any| { + let source = any + .downcast_ref::>>() + .expect("DecisionMinimumDominatingSet aggregate reduction source type mismatch"); + Box::new( + > as crate::rules::ReduceToAggregate< + MinimumDominatingSet, + >>::reduce_to_aggregate(source), + ) + }), + capabilities: crate::rules::EdgeCapabilities::both(), + overhead_eval_fn: |any| { + let source = any + .downcast_ref::>>() + .expect("DecisionMinimumDominatingSet overhead source type mismatch"); + crate::types::ProblemSize::new(vec![ + ("num_vertices", source.num_vertices()), + ("num_edges", source.num_edges()), + ]) + }, + source_size_fn: |any| { + let source = any + .downcast_ref::>>() + .expect("DecisionMinimumDominatingSet size source type mismatch"); + crate::types::ProblemSize::new(vec![ + ("num_vertices", source.num_vertices()), + ("num_edges", source.num_edges()), + ("k", source.k()), + ]) + }, + } +} + +// Reverse edge: MDS → Decision> (Turing) +inventory::submit! { + crate::rules::ReductionEntry { + source_name: "MinimumDominatingSet", + target_name: "DecisionMinimumDominatingSet", + source_variant_fn: as Problem>::variant, + target_variant_fn: > as Problem>::variant, + overhead_fn: || crate::rules::ReductionOverhead::identity(&["num_vertices", "num_edges"]), + module_path: module_path!(), + reduce_fn: None, + reduce_aggregate_fn: None, + capabilities: crate::rules::EdgeCapabilities::turing(), + overhead_eval_fn: |any| { + let source = any + .downcast_ref::>() + .expect("DecisionMinimumDominatingSet turing overhead source type mismatch"); + crate::types::ProblemSize::new(vec![ + ("num_vertices", source.num_vertices()), + ("num_edges", source.num_edges()), + ]) + }, + source_size_fn: |any| { + let source = any + .downcast_ref::>() + .expect("DecisionMinimumDominatingSet turing size source type mismatch"); + crate::types::ProblemSize::new(vec![ + ("num_vertices", source.num_vertices()), + ("num_edges", source.num_edges()), + ]) + }, + } +} + #[cfg(feature = "example-db")] pub(crate) fn canonical_model_example_specs() -> Vec { vec![crate::example_db::specs::ModelExampleSpec { @@ -228,50 +373,97 @@ pub(crate) fn canonical_model_example_specs() -> Vec Vec { - vec![crate::example_db::specs::ModelExampleSpec { - id: "decision_minimum_dominating_set_simplegraph_i32", - instance: Box::new(Decision::new( - MinimumDominatingSet::new( - SimpleGraph::new(5, vec![(0, 1), (0, 2), (1, 3), (2, 3), (2, 4), (3, 4)]), - vec![1i32; 5], - ), - 2, - )), - optimal_config: vec![0, 0, 1, 1, 0], - optimal_value: serde_json::json!(true), - }] -} - -#[cfg(feature = "example-db")] -pub(crate) fn decision_canonical_rule_example_specs( -) -> Vec { - vec![crate::example_db::specs::RuleExampleSpec { - id: "decision_minimum_dominating_set_to_minimum_dominating_set", - build: || { - use crate::example_db::specs::assemble_rule_example; - use crate::export::SolutionPair; - use crate::rules::{AggregateReductionResult, ReduceToAggregate}; - - let source = crate::models::decision::Decision::new( + vec![ + crate::example_db::specs::ModelExampleSpec { + id: "decision_minimum_dominating_set_simplegraph_i32", + instance: Box::new(Decision::new( MinimumDominatingSet::new( SimpleGraph::new(5, vec![(0, 1), (0, 2), (1, 3), (2, 3), (2, 4), (3, 4)]), vec![1i32; 5], ), 2, - ); - let result = source.reduce_to_aggregate(); - let target = result.target_problem(); - let config = vec![0, 0, 1, 1, 0]; - assemble_rule_example( - &source, - target, - vec![SolutionPair { - source_config: config.clone(), - target_config: config, - }], - ) + )), + optimal_config: vec![0, 0, 1, 1, 0], + optimal_value: serde_json::json!(true), }, - }] + crate::example_db::specs::ModelExampleSpec { + id: "decision_minimum_dominating_set_simplegraph_one", + instance: Box::new(Decision::new( + MinimumDominatingSet::new( + SimpleGraph::new( + 6, + vec![(0, 1), (0, 2), (1, 3), (2, 3), (3, 4), (3, 5), (4, 5)], + ), + vec![One; 6], + ), + 2, + )), + optimal_config: vec![1, 0, 0, 1, 0, 0], + optimal_value: serde_json::json!(true), + }, + ] +} + +#[cfg(feature = "example-db")] +pub(crate) fn decision_canonical_rule_example_specs( +) -> Vec { + vec![ + crate::example_db::specs::RuleExampleSpec { + id: "decision_minimum_dominating_set_to_minimum_dominating_set", + build: || { + use crate::example_db::specs::assemble_rule_example; + use crate::export::SolutionPair; + use crate::rules::{AggregateReductionResult, ReduceToAggregate}; + + let source = crate::models::decision::Decision::new( + MinimumDominatingSet::new( + SimpleGraph::new(5, vec![(0, 1), (0, 2), (1, 3), (2, 3), (2, 4), (3, 4)]), + vec![1i32; 5], + ), + 2, + ); + let result = source.reduce_to_aggregate(); + let target = result.target_problem(); + let config = vec![0, 0, 1, 1, 0]; + assemble_rule_example( + &source, + target, + vec![SolutionPair { + source_config: config.clone(), + target_config: config, + }], + ) + }, + }, + // One-weight variant: Decision> → MDS (aggregate) + crate::example_db::specs::RuleExampleSpec { + id: "decision_minimum_dominating_set_one_to_minimum_dominating_set_one", + build: || { + use crate::example_db::specs::assemble_rule_example; + use crate::export::SolutionPair; + use crate::rules::{AggregateReductionResult, ReduceToAggregate}; + + let source = crate::models::decision::Decision::new( + MinimumDominatingSet::new( + SimpleGraph::new(5, vec![(0, 1), (0, 2), (1, 3), (2, 3), (2, 4), (3, 4)]), + vec![One; 5], + ), + 2, + ); + let result = source.reduce_to_aggregate(); + let target = result.target_problem(); + let config = vec![0, 0, 1, 1, 0]; + assemble_rule_example( + &source, + target, + vec![SolutionPair { + source_config: config.clone(), + target_config: config, + }], + ) + }, + }, + ] } /// Check if a set of vertices is a dominating set. diff --git a/src/models/misc/maximum_likelihood_ranking.rs b/src/models/misc/maximum_likelihood_ranking.rs index a49aee98..89d178d4 100644 --- a/src/models/misc/maximum_likelihood_ranking.rs +++ b/src/models/misc/maximum_likelihood_ranking.rs @@ -1,8 +1,10 @@ //! Maximum Likelihood Ranking problem implementation. //! -//! Given an n x n comparison matrix A where a_ij + a_ji = c and a_ii = 0, -//! find a permutation pi minimizing the total disagreement cost: -//! sum over all position pairs (i > j) of a_{pi(i), pi(j)}. +//! Given an n x n antisymmetric comparison matrix A where a_ij + a_ji = c +//! (constant) for every pair and a_ii = 0, find a permutation pi minimizing +//! the total disagreement cost: sum over all position pairs (i > j) of +//! a_{pi(i), pi(j)}. Entries may be negative (e.g. c = 0 gives a +//! skew-symmetric matrix). use crate::registry::{FieldInfo, ProblemSchemaEntry}; use crate::traits::Problem; @@ -18,16 +20,17 @@ inventory::submit! { module_path: module_path!(), description: "Find a ranking minimizing total pairwise disagreement cost", fields: &[ - FieldInfo { name: "matrix", type_name: "Vec>", description: "Comparison matrix A (a_ij + a_ji = c, a_ii = 0)" }, + FieldInfo { name: "matrix", type_name: "Vec>", description: "Antisymmetric comparison matrix A (a_ij + a_ji = c, a_ii = 0)" }, ], } } /// The Maximum Likelihood Ranking problem. /// -/// Given an n x n comparison matrix A where a_ij + a_ji = c (constant) -/// and a_ii = 0, find a permutation pi that minimizes the total -/// disagreement cost: sum_{i > j} a_{pi(i), pi(j)}. +/// Given an n x n antisymmetric comparison matrix A where a_ij + a_ji = c +/// (constant) for every pair and a_ii = 0, find a permutation pi that +/// minimizes the total disagreement cost: sum_{i > j} a_{pi(i), pi(j)}. +/// Entries may be negative (e.g. c = 0 gives a skew-symmetric matrix). /// /// Each item is assigned a rank position (0-indexed). The configuration /// maps item -> rank: `config[item] = rank`. The permutation pi maps @@ -59,7 +62,9 @@ impl MaximumLikelihoodRanking { /// Create a new MaximumLikelihoodRanking instance. /// /// # Panics - /// Panics if the matrix is not square, or if any diagonal element is nonzero. + /// Panics if the matrix is not square, if any diagonal element is nonzero, + /// or if the pairwise sums `a_ij + a_ji` are not the same constant for + /// all `i != j`. pub fn new(matrix: Vec>) -> Self { let n = matrix.len(); for (i, row) in matrix.iter().enumerate() { @@ -75,6 +80,22 @@ impl MaximumLikelihoodRanking { row[i] ); } + + let mut comparison_count = None; + for (i, row) in matrix.iter().enumerate() { + for (j, &entry) in row.iter().enumerate().skip(i + 1) { + let pair_sum = entry + matrix[j][i]; + match comparison_count { + None => comparison_count = Some(pair_sum), + Some(expected) => assert_eq!( + pair_sum, + expected, + "all off-diagonal pairs must have the same comparison count: matrix[{i}][{j}] + matrix[{j}][{i}] = {pair_sum}, expected {expected}" + ), + } + } + } + Self { matrix } } @@ -87,6 +108,15 @@ impl MaximumLikelihoodRanking { pub fn num_items(&self) -> usize { self.matrix.len() } + + /// Returns the constant pairwise comparison count `c`. + pub fn comparison_count(&self) -> i32 { + if self.matrix.len() < 2 { + 0 + } else { + self.matrix[0][1] + self.matrix[1][0] + } + } } impl Problem for MaximumLikelihoodRanking { diff --git a/src/models/misc/minimum_fault_detection_test_set.rs b/src/models/misc/minimum_fault_detection_test_set.rs index cc62b223..43efeae6 100644 --- a/src/models/misc/minimum_fault_detection_test_set.rs +++ b/src/models/misc/minimum_fault_detection_test_set.rs @@ -1,7 +1,8 @@ //! Minimum Fault Detection Test Set problem implementation. //! //! Given a directed acyclic graph with designated input and output vertices, -//! find the minimum set of input-output pairs whose coverage sets cover all vertices. +//! find the minimum set of input-output pairs whose coverage sets cover all +//! internal vertices. use crate::registry::{FieldInfo, ProblemSchemaEntry}; use crate::traits::Problem; @@ -16,7 +17,7 @@ inventory::submit! { aliases: &[], dimensions: &[], module_path: module_path!(), - description: "Find minimum set of input-output paths covering all DAG vertices", + description: "Find minimum set of input-output paths covering all internal DAG vertices", fields: &[ FieldInfo { name: "num_vertices", type_name: "usize", description: "Number of vertices in the DAG" }, FieldInfo { name: "arcs", type_name: "Vec<(usize, usize)>", description: "Directed arcs (u, v)" }, @@ -31,10 +32,11 @@ inventory::submit! { /// Given a directed acyclic graph G = (V, A) with designated input vertices /// I ⊆ V and output vertices O ⊆ V, find the minimum number of input-output /// pairs (i, o) ∈ I × O such that the union of their coverage sets covers -/// all vertices V. +/// every internal vertex V \ (I ∪ O). /// /// For a pair (i, o), the coverage set is the set of vertices reachable from i -/// that can also reach o (i.e., vertices on some i-to-o path). +/// that can also reach o (i.e., vertices on some i-to-o path). Inputs and +/// outputs themselves are not required to be covered. /// /// The configuration space is binary over all |I| × |O| pairs. /// @@ -286,18 +288,33 @@ impl Problem for MinimumFaultDetectionTestSet { return Min(None); } - // Collect union of coverage sets for selected pairs + let mut boundary = vec![false; self.num_vertices]; + for &input in &self.inputs { + boundary[input] = true; + } + for &output in &self.outputs { + boundary[output] = true; + } + let required_internal_vertices = + boundary.iter().filter(|&&is_boundary| !is_boundary).count(); + + // Collect union of internal vertices covered by the selected pairs. let mut covered: HashSet = HashSet::new(); let mut count = 0usize; for (idx, &sel) in config.iter().enumerate() { if sel == 1 { count += 1; - covered.extend(&self.coverage[idx]); + covered.extend( + self.coverage[idx] + .iter() + .copied() + .filter(|&vertex| !boundary[vertex]), + ); } } - // Check all vertices are covered - if covered.len() == self.num_vertices { + // Check all internal vertices are covered. + if covered.len() == required_internal_vertices { Min(Some(count)) } else { Min(None) @@ -312,9 +329,10 @@ crate::declare_variants! { #[cfg(feature = "example-db")] pub(crate) fn canonical_model_example_specs() -> Vec { // 7 vertices, inputs={0,1}, outputs={5,6} + // Internal vertices are {2,3,4} // Arcs: (0,2),(0,3),(1,3),(1,4),(2,5),(3,5),(3,6),(4,6) // Pairs: (0,5)->{0,2,3,5}, (0,6)->{0,3,6}, (1,5)->{1,3,5}, (1,6)->{1,3,4,6} - // Config [1,0,0,1]: select pairs (0,5) and (1,6) -> covers all 7 -> Min(2) + // Config [1,0,0,1]: select pairs (0,5) and (1,6) -> covers all internal vertices -> Min(2) vec![crate::example_db::specs::ModelExampleSpec { id: "minimum_fault_detection_test_set", instance: Box::new(MinimumFaultDetectionTestSet::new( diff --git a/src/models/misc/mod.rs b/src/models/misc/mod.rs index ca2c7c7a..ee7e2e65 100644 --- a/src/models/misc/mod.rs +++ b/src/models/misc/mod.rs @@ -30,7 +30,7 @@ //! - [`MinimumCodeGenerationParallelAssignments`]: Minimize backward dependencies when ordering parallel assignments //! - [`MinimumCodeGenerationUnlimitedRegisters`]: Minimize instruction count for an unlimited-register machine with 2-address instructions //! - [`MinimumExternalMacroDataCompression`]: Minimize compression cost using external dictionary -//! - [`MinimumFaultDetectionTestSet`]: Find minimum set of input-output paths covering all DAG vertices +//! - [`MinimumFaultDetectionTestSet`]: Find minimum set of input-output paths covering all internal DAG vertices //! - [`MinimumInternalMacroDataCompression`]: Minimize self-referencing compression cost //! - [`MinimumRegisterSufficiencyForLoops`]: Minimize registers for loop variable allocation (circular arc coloring) //! - [`MinimumWeightAndOrGraph`]: Find minimum-weight solution subgraph in a DAG with AND/OR gates diff --git a/src/models/misc/register_sufficiency.rs b/src/models/misc/register_sufficiency.rs index 21f9d775..3530cdde 100644 --- a/src/models/misc/register_sufficiency.rs +++ b/src/models/misc/register_sufficiency.rs @@ -99,6 +99,15 @@ impl RegisterSufficiency { self.arcs.len() } + /// Count vertices with no dependents. + pub fn num_sinks(&self) -> usize { + let mut has_dependent = vec![false; self.num_vertices]; + for &(_, dependency) in &self.arcs { + has_dependent[dependency] = true; + } + has_dependent.into_iter().filter(|&flag| !flag).count() + } + /// Get the register bound K. pub fn bound(&self) -> usize { self.bound @@ -191,6 +200,144 @@ impl RegisterSufficiency { Some(max_registers) } + + /// Exact branch-and-bound solver: finds a topological ordering using at + /// most `self.bound` registers, or returns `None` if no such ordering + /// exists. Uses heuristic candidate ordering (prefer vertices that free + /// the most registers) so that YES instances typically resolve on the + /// first greedy path without backtracking. For NO instances the full + /// search tree must be explored, so prefer the ILP solver path for + /// infeasibility proofs. + /// + /// NOTE: a greedy topological sort is *not* exact — it can miss valid + /// orderings. This method is exact because it backtracks when the + /// greedy choice fails. Do not replace it with a pure greedy solver. + pub fn solve_exact(&self) -> Option> { + let n = self.num_vertices; + if n == 0 { + return Some(vec![]); + } + + let mut dependents: Vec> = vec![vec![]; n]; + let mut dependencies: Vec> = vec![vec![]; n]; + let mut in_degree = vec![0u32; n]; + for &(v, u) in &self.arcs { + in_degree[v] += 1; + dependents[u].push(v); + dependencies[v].push(u); + } + + let mut state = BnBState { + n, + bound: self.bound, + config: vec![0usize; n], + live: vec![false; n], + live_count: 0, + remaining_in_degree: in_degree.clone(), + remaining_deps: dependents.iter().map(|d| d.len()).collect(), + ready: (0..n).filter(|&v| in_degree[v] == 0).collect(), + dependents, + dependencies, + }; + state.ready.sort_unstable(); + + if state.backtrack(0) { + Some(state.config) + } else { + None + } + } +} + +struct BnBState { + n: usize, + bound: usize, + config: Vec, + live: Vec, + live_count: usize, + remaining_in_degree: Vec, + remaining_deps: Vec, + ready: Vec, + dependents: Vec>, + dependencies: Vec>, +} + +impl BnBState { + fn backtrack(&mut self, step: usize) -> bool { + if step == self.n { + return true; + } + + // Heuristic: prefer vertices that free the most registers. + let mut candidates = self.ready.clone(); + candidates.sort_by_key(|&v| { + let frees = self.dependencies[v] + .iter() + .filter(|&&dep| self.remaining_deps[dep] == 1 && self.live[dep]) + .count(); + std::cmp::Reverse(frees) + }); + + for &vertex in &candidates { + self.config[vertex] = step; + + let was_live = self.live[vertex]; + if !was_live { + self.live[vertex] = true; + self.live_count += 1; + } + + let mut freed = Vec::new(); + for &dep in &self.dependencies[vertex] { + self.remaining_deps[dep] -= 1; + if self.remaining_deps[dep] == 0 && self.live[dep] { + self.live[dep] = false; + self.live_count -= 1; + freed.push(dep); + } + } + + if self.live_count <= self.bound { + self.ready.retain(|&v| v != vertex); + let mut newly_ready = Vec::new(); + for &dep in &self.dependents[vertex] { + self.remaining_in_degree[dep] -= 1; + if self.remaining_in_degree[dep] == 0 { + self.ready.push(dep); + newly_ready.push(dep); + } + } + + if self.backtrack(step + 1) { + return true; + } + + for &dep in &newly_ready { + self.ready.retain(|&v| v != dep); + } + for &dep in &self.dependents[vertex] { + self.remaining_in_degree[dep] += 1; + } + self.ready.push(vertex); + self.ready.sort_unstable(); + } + + for &dep in &freed { + self.live[dep] = true; + self.live_count += 1; + } + for &dep in &self.dependencies[vertex] { + self.remaining_deps[dep] += 1; + } + + if !was_live { + self.live[vertex] = false; + self.live_count -= 1; + } + } + + false + } } impl Problem for RegisterSufficiency { diff --git a/src/rules/analysis.rs b/src/rules/analysis.rs index 9dac694e..6d616877 100644 --- a/src/rules/analysis.rs +++ b/src/rules/analysis.rs @@ -343,15 +343,21 @@ pub fn compare_overhead( pub fn find_dominated_rules( graph: &ReductionGraph, ) -> (Vec, Vec) { + const MAX_PATHS_PER_EDGE: usize = 1024; + const MAX_INTERMEDIATE_NODES: usize = 6; + let mut dominated = Vec::new(); let mut unknown = Vec::new(); for edge_info in all_edges(graph) { - let paths = graph.find_all_paths( + let paths = graph.find_paths_up_to_mode_bounded( edge_info.source_name, &edge_info.source_variant, edge_info.target_name, &edge_info.target_variant, + crate::rules::graph::ReductionMode::Witness, + MAX_PATHS_PER_EDGE, + Some(MAX_INTERMEDIATE_NODES), ); let mut best_dominating: Option<(ReductionPath, ReductionOverhead, Vec)> = None; diff --git a/src/rules/clustering_ilp.rs b/src/rules/clustering_ilp.rs new file mode 100644 index 00000000..659eb0d3 --- /dev/null +++ b/src/rules/clustering_ilp.rs @@ -0,0 +1,127 @@ +//! Reduction from Clustering to ILP (Integer Linear Programming). +//! +//! Use one binary assignment variable `x_{i,c}` for each element `i` and +//! cluster `c`. Equality constraints force every element into exactly one +//! cluster, and conflict constraints forbid any pair with distance above the +//! diameter bound from sharing a cluster. + +use crate::models::algebraic::{LinearConstraint, ObjectiveSense, ILP}; +use crate::models::misc::Clustering; +use crate::reduction; +use crate::rules::traits::{ReduceTo, ReductionResult}; + +/// Result of reducing Clustering to ILP. +#[derive(Debug, Clone)] +pub struct ReductionClusteringToILP { + target: ILP, + num_elements: usize, + num_clusters: usize, +} + +impl ReductionClusteringToILP { + fn var_index(&self, element: usize, cluster: usize) -> usize { + element * self.num_clusters + cluster + } +} + +impl ReductionResult for ReductionClusteringToILP { + type Source = Clustering; + type Target = ILP; + + fn target_problem(&self) -> &Self::Target { + &self.target + } + + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + (0..self.num_elements) + .map(|element| { + (0..self.num_clusters) + .find(|&cluster| { + let idx = self.var_index(element, cluster); + idx < target_solution.len() && target_solution[idx] == 1 + }) + .unwrap_or(0) + }) + .collect() + } +} + +#[reduction( + overhead = { + num_vars = "num_elements * num_clusters", + num_constraints = "num_elements + num_elements * (num_elements - 1) / 2 * num_clusters", + } +)] +impl ReduceTo> for Clustering { + type Result = ReductionClusteringToILP; + + fn reduce_to(&self) -> Self::Result { + let num_elements = self.num_elements(); + let num_clusters = self.num_clusters(); + let num_vars = num_elements * num_clusters; + let mut constraints = Vec::new(); + + let var_index = + |element: usize, cluster: usize| -> usize { element * num_clusters + cluster }; + + for element in 0..num_elements { + let terms: Vec<(usize, f64)> = (0..num_clusters) + .map(|cluster| (var_index(element, cluster), 1.0)) + .collect(); + constraints.push(LinearConstraint::eq(terms, 1.0)); + } + + let distances = self.distances(); + let diameter_bound = self.diameter_bound(); + for (i, row) in distances.iter().enumerate() { + for (j, &distance) in row.iter().enumerate().skip(i + 1) { + if distance > diameter_bound { + for cluster in 0..num_clusters { + constraints.push(LinearConstraint::le( + vec![(var_index(i, cluster), 1.0), (var_index(j, cluster), 1.0)], + 1.0, + )); + } + } + } + } + + ReductionClusteringToILP { + target: ILP::new(num_vars, constraints, vec![], ObjectiveSense::Minimize), + num_elements, + num_clusters, + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + + vec![crate::example_db::specs::RuleExampleSpec { + id: "clustering_to_ilp", + build: || { + let source = Clustering::new( + vec![ + vec![0, 1, 3, 3], + vec![1, 0, 3, 3], + vec![3, 3, 0, 1], + vec![3, 3, 1, 0], + ], + 2, + 1, + ); + crate::example_db::specs::rule_example_with_witness::<_, ILP>( + source, + SolutionPair { + source_config: vec![0, 0, 1, 1], + target_config: vec![1, 0, 1, 0, 0, 1, 0, 1], + }, + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/clustering_ilp.rs"] +mod tests; diff --git a/src/rules/decisionminimumdominatingset_minimumsummulticenter.rs b/src/rules/decisionminimumdominatingset_minimumsummulticenter.rs new file mode 100644 index 00000000..807b03c3 --- /dev/null +++ b/src/rules/decisionminimumdominatingset_minimumsummulticenter.rs @@ -0,0 +1,82 @@ +//! Reduction from Decision Minimum Dominating Set to Minimum Sum Multicenter. +//! +//! On unit-weight, unit-length graphs, choosing `K` centers with total distance +//! `n - K` is exactly choosing a dominating set of size at most `K`. + +use crate::models::decision::Decision; +use crate::models::graph::{MinimumDominatingSet, MinimumSumMulticenter}; +use crate::reduction; +use crate::rules::traits::{ReduceTo, ReductionResult}; +use crate::topology::{Graph, SimpleGraph}; +use crate::types::One; + +/// Result of reducing DecisionMinimumDominatingSet to MinimumSumMulticenter. +#[derive(Debug, Clone)] +pub struct ReductionDecisionMinimumDominatingSetToMinimumSumMulticenter { + target: MinimumSumMulticenter, +} + +impl ReductionResult for ReductionDecisionMinimumDominatingSetToMinimumSumMulticenter { + type Source = Decision>; + type Target = MinimumSumMulticenter; + + fn target_problem(&self) -> &Self::Target { + &self.target + } + + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + target_solution.to_vec() + } +} + +#[reduction(overhead = { num_vertices = "num_vertices", num_edges = "num_edges" })] +impl ReduceTo> + for Decision> +{ + type Result = ReductionDecisionMinimumDominatingSetToMinimumSumMulticenter; + + fn reduce_to(&self) -> Self::Result { + let source_graph = self.inner().graph(); + let target = MinimumSumMulticenter::new( + SimpleGraph::new(source_graph.num_vertices(), source_graph.edges()), + vec![1i32; source_graph.num_vertices()], + vec![1i32; source_graph.num_edges()], + self.k(), + ); + ReductionDecisionMinimumDominatingSetToMinimumSumMulticenter { target } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + + vec![crate::example_db::specs::RuleExampleSpec { + id: "decisionminimumdominatingset_to_minimumsummulticenter", + build: || { + crate::example_db::specs::rule_example_with_witness::< + _, + MinimumSumMulticenter, + >( + Decision::new( + MinimumDominatingSet::new( + SimpleGraph::new( + 6, + vec![(0, 1), (0, 2), (1, 3), (2, 3), (3, 4), (3, 5), (4, 5)], + ), + vec![One; 6], + ), + 2, + ), + SolutionPair { + source_config: vec![1, 0, 0, 1, 0, 0], + target_config: vec![1, 0, 0, 1, 0, 0], + }, + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/decisionminimumdominatingset_minimumsummulticenter.rs"] +mod tests; diff --git a/src/rules/decisionminimumdominatingset_minmaxmulticenter.rs b/src/rules/decisionminimumdominatingset_minmaxmulticenter.rs new file mode 100644 index 00000000..26af00e8 --- /dev/null +++ b/src/rules/decisionminimumdominatingset_minmaxmulticenter.rs @@ -0,0 +1,87 @@ +//! Reduction from Decision Minimum Dominating Set to Min-Max Multicenter. +//! +//! On unit-weight, unit-length graphs, choosing `K` centers with maximum +//! distance at most `1` is exactly choosing a dominating set of size `K`. + +use crate::models::decision::Decision; +use crate::models::graph::{MinMaxMulticenter, MinimumDominatingSet}; +use crate::reduction; +use crate::rules::traits::{ReduceTo, ReductionResult}; +use crate::topology::{Graph, SimpleGraph}; +use crate::types::One; + +/// Result of reducing DecisionMinimumDominatingSet to MinMaxMulticenter. +#[derive(Debug, Clone)] +pub struct ReductionDecisionMinimumDominatingSetToMinMaxMulticenter { + target: MinMaxMulticenter, +} + +impl ReductionResult for ReductionDecisionMinimumDominatingSetToMinMaxMulticenter { + type Source = Decision>; + type Target = MinMaxMulticenter; + + fn target_problem(&self) -> &Self::Target { + &self.target + } + + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + target_solution.to_vec() + } +} + +#[reduction( + overhead = { + num_vertices = "num_vertices", + num_edges = "num_edges", + } +)] +impl ReduceTo> + for Decision> +{ + type Result = ReductionDecisionMinimumDominatingSetToMinMaxMulticenter; + + fn reduce_to(&self) -> Self::Result { + let source_graph = self.inner().graph(); + let target = MinMaxMulticenter::new( + SimpleGraph::new(source_graph.num_vertices(), source_graph.edges()), + vec![One; source_graph.num_vertices()], + vec![One; source_graph.num_edges()], + self.k(), + ); + ReductionDecisionMinimumDominatingSetToMinMaxMulticenter { target } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + + vec![crate::example_db::specs::RuleExampleSpec { + id: "decisionminimumdominatingset_to_minmaxmulticenter", + build: || { + crate::example_db::specs::rule_example_with_witness::< + _, + MinMaxMulticenter, + >( + Decision::new( + MinimumDominatingSet::new( + SimpleGraph::new( + 6, + vec![(0, 1), (0, 2), (1, 3), (2, 3), (3, 4), (3, 5), (4, 5)], + ), + vec![One; 6], + ), + 2, + ), + SolutionPair { + source_config: vec![1, 0, 0, 1, 0, 0], + target_config: vec![1, 0, 0, 1, 0, 0], + }, + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/decisionminimumdominatingset_minmaxmulticenter.rs"] +mod tests; diff --git a/src/rules/decisionminimumvertexcover_hamiltoniancircuit.rs b/src/rules/decisionminimumvertexcover_hamiltoniancircuit.rs new file mode 100644 index 00000000..963e3f5e --- /dev/null +++ b/src/rules/decisionminimumvertexcover_hamiltoniancircuit.rs @@ -0,0 +1,458 @@ +//! Reduction from Decision Minimum Vertex Cover to Hamiltonian Circuit. +//! +//! This implements the gadget construction from Garey & Johnson, Theorem 3.4, +//! on the unit-weight `Decision>` model. + +use crate::models::decision::Decision; +use crate::models::graph::{HamiltonianCircuit, MinimumVertexCover}; +use crate::reduction; +use crate::rules::traits::{ReduceTo, ReductionResult}; +use crate::topology::{Graph, SimpleGraph}; +use crate::traits::Problem; +use std::collections::BTreeSet; + +#[derive(Debug, Clone)] +enum ConstructionKind { + FixedYes { source_cover: Vec }, + FixedNo { num_source_vertices: usize }, + Theorem(TheoremConstruction), +} + +#[derive(Debug, Clone)] +struct TheoremConstruction { + num_source_vertices: usize, + selector_count: usize, + edges: Vec<(usize, usize)>, + incident_edges: Vec>, +} + +impl TheoremConstruction { + fn active_vertices(&self) -> impl Iterator + '_ { + self.incident_edges + .iter() + .enumerate() + .filter(|(_, edges)| !edges.is_empty()) + .map(|(v, _)| v) + } + + fn gadget_base(&self, edge_idx: usize) -> usize { + self.selector_count + 12 * edge_idx + } + + fn gadget_vertex(&self, edge_idx: usize, vertex: usize, position: usize) -> usize { + let (u, v) = self.edges[edge_idx]; + let side = if vertex == u { + 0 + } else if vertex == v { + 1 + } else { + panic!( + "vertex {vertex} is not incident on edge {:?}", + self.edges[edge_idx] + ); + }; + + self.gadget_base(edge_idx) + side * 6 + (position - 1) + } + + fn path_endpoints(&self, vertex: usize) -> Option<(usize, usize)> { + let incident = self.incident_edges.get(vertex)?; + let first = *incident.first()?; + let last = *incident.last()?; + Some(( + self.gadget_vertex(first, vertex, 1), + self.gadget_vertex(last, vertex, 6), + )) + } + + fn covers_all_edges(&self, selected: &[usize]) -> bool { + self.edges + .iter() + .all(|&(u, v)| selected.get(u) == Some(&1) || selected.get(v) == Some(&1)) + } + + #[cfg(any(test, feature = "example-db"))] + fn exact_selected_vertices(&self, source_cover: &[usize]) -> Option> { + if source_cover.len() != self.num_source_vertices || !self.covers_all_edges(source_cover) { + return None; + } + + let mut selected: Vec = self + .active_vertices() + .filter(|&v| source_cover[v] == 1) + .collect(); + + if selected.len() > self.selector_count { + return None; + } + + for v in self.active_vertices() { + if selected.len() == self.selector_count { + break; + } + if source_cover[v] == 0 { + selected.push(v); + } + } + + (selected.len() == self.selector_count).then_some(selected) + } + + #[cfg(any(test, feature = "example-db"))] + fn gadget_segment( + &self, + edge_idx: usize, + vertex: usize, + selected_exact: &BTreeSet, + ) -> Vec { + let (u, v) = self.edges[edge_idx]; + let other = if vertex == u { + v + } else if vertex == v { + u + } else { + panic!( + "vertex {vertex} is not incident on edge {:?}", + self.edges[edge_idx] + ); + }; + + if selected_exact.contains(&other) { + return (1..=6) + .map(|position| self.gadget_vertex(edge_idx, vertex, position)) + .collect(); + } + + if vertex == u { + vec![ + self.gadget_vertex(edge_idx, u, 1), + self.gadget_vertex(edge_idx, u, 2), + self.gadget_vertex(edge_idx, u, 3), + self.gadget_vertex(edge_idx, v, 1), + self.gadget_vertex(edge_idx, v, 2), + self.gadget_vertex(edge_idx, v, 3), + self.gadget_vertex(edge_idx, v, 4), + self.gadget_vertex(edge_idx, v, 5), + self.gadget_vertex(edge_idx, v, 6), + self.gadget_vertex(edge_idx, u, 4), + self.gadget_vertex(edge_idx, u, 5), + self.gadget_vertex(edge_idx, u, 6), + ] + } else { + vec![ + self.gadget_vertex(edge_idx, v, 1), + self.gadget_vertex(edge_idx, v, 2), + self.gadget_vertex(edge_idx, v, 3), + self.gadget_vertex(edge_idx, u, 1), + self.gadget_vertex(edge_idx, u, 2), + self.gadget_vertex(edge_idx, u, 3), + self.gadget_vertex(edge_idx, u, 4), + self.gadget_vertex(edge_idx, u, 5), + self.gadget_vertex(edge_idx, u, 6), + self.gadget_vertex(edge_idx, v, 4), + self.gadget_vertex(edge_idx, v, 5), + self.gadget_vertex(edge_idx, v, 6), + ] + } + } + + #[cfg(any(test, feature = "example-db"))] + fn vertex_path(&self, vertex: usize, selected_exact: &BTreeSet) -> Vec { + let mut path = Vec::new(); + for &edge_idx in &self.incident_edges[vertex] { + path.extend(self.gadget_segment(edge_idx, vertex, selected_exact)); + } + path + } + + #[cfg(any(test, feature = "example-db"))] + fn build_target_witness(&self, source_cover: &[usize]) -> Vec { + let Some(selected_vertices) = self.exact_selected_vertices(source_cover) else { + return Vec::new(); + }; + + let selected_exact: BTreeSet = selected_vertices.iter().copied().collect(); + let mut witness = Vec::with_capacity(self.selector_count + 12 * self.edges.len()); + + for (selector, &vertex) in selected_vertices.iter().enumerate() { + witness.push(selector); + witness.extend(self.vertex_path(vertex, &selected_exact)); + } + + witness + } + + fn extract_solution( + &self, + target_problem: &HamiltonianCircuit, + target_solution: &[usize], + ) -> Vec { + let mut source_cover = vec![0; self.num_source_vertices]; + if !target_problem.evaluate(target_solution).0 { + return source_cover; + } + + let mut positions = vec![usize::MAX; target_solution.len()]; + for (idx, &vertex) in target_solution.iter().enumerate() { + if vertex >= positions.len() || positions[vertex] != usize::MAX { + return vec![0; self.num_source_vertices]; + } + positions[vertex] = idx; + } + + let len = target_solution.len(); + let touches_selector = |vertex: usize| { + let idx = positions[vertex]; + let prev = target_solution[(idx + len - 1) % len]; + let next = target_solution[(idx + 1) % len]; + prev < self.selector_count || next < self.selector_count + }; + + for vertex in self.active_vertices() { + let Some((start, end)) = self.path_endpoints(vertex) else { + continue; + }; + if touches_selector(start) && touches_selector(end) { + source_cover[vertex] = 1; + } + } + + let selected_count = source_cover.iter().filter(|&&x| x == 1).count(); + if selected_count != self.selector_count || !self.covers_all_edges(&source_cover) { + return vec![0; self.num_source_vertices]; + } + + source_cover + } +} + +/// Result of reducing Decision> to +/// HamiltonianCircuit. +#[derive(Debug, Clone)] +pub struct ReductionDecisionMinimumVertexCoverToHamiltonianCircuit { + target: HamiltonianCircuit, + construction: ConstructionKind, +} + +impl ReductionDecisionMinimumVertexCoverToHamiltonianCircuit { + #[cfg(any(test, feature = "example-db"))] + fn build_target_witness(&self, source_cover: &[usize]) -> Vec { + match &self.construction { + ConstructionKind::FixedYes { .. } => vec![0, 1, 2], + ConstructionKind::FixedNo { .. } => Vec::new(), + ConstructionKind::Theorem(construction) => { + construction.build_target_witness(source_cover) + } + } + } +} + +impl ReductionResult for ReductionDecisionMinimumVertexCoverToHamiltonianCircuit { + type Source = Decision>; + type Target = HamiltonianCircuit; + + fn target_problem(&self) -> &Self::Target { + &self.target + } + + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + match &self.construction { + ConstructionKind::FixedYes { source_cover } => { + if self.target.evaluate(target_solution).0 { + source_cover.clone() + } else { + vec![0; source_cover.len()] + } + } + ConstructionKind::FixedNo { + num_source_vertices, + } => vec![0; *num_source_vertices], + ConstructionKind::Theorem(construction) => { + construction.extract_solution(&self.target, target_solution) + } + } + } +} + +fn normalize_edges(edges: Vec<(usize, usize)>) -> Vec<(usize, usize)> { + let mut normalized: Vec<_> = edges + .into_iter() + .map(|(u, v)| if u < v { (u, v) } else { (v, u) }) + .collect(); + normalized.sort_unstable(); + normalized +} + +fn insert_edge(edges: &mut BTreeSet<(usize, usize)>, a: usize, b: usize) { + let edge = if a < b { (a, b) } else { (b, a) }; + edges.insert(edge); +} + +#[reduction( + overhead = { + num_vertices = "12 * num_edges + k", + num_edges = "16 * num_edges - num_vertices + 2 * k * num_vertices", + } +)] +impl ReduceTo> for Decision> { + type Result = ReductionDecisionMinimumVertexCoverToHamiltonianCircuit; + + fn reduce_to(&self) -> Self::Result { + let weights = self.inner().weights(); + assert!( + weights.iter().all(|&weight| weight == 1), + "Garey-Johnson Theorem 3.4 requires unit vertex weights" + ); + + let num_source_vertices = self.inner().graph().num_vertices(); + let raw_bound = *self.bound(); + if raw_bound < 0 { + return ReductionDecisionMinimumVertexCoverToHamiltonianCircuit { + target: HamiltonianCircuit::new(SimpleGraph::path(3)), + construction: ConstructionKind::FixedNo { + num_source_vertices, + }, + }; + } + + let k = self.k(); + let edges = normalize_edges(self.inner().graph().edges()); + let mut incident_edges = vec![Vec::new(); num_source_vertices]; + for (edge_idx, &(u, v)) in edges.iter().enumerate() { + incident_edges[u].push(edge_idx); + incident_edges[v].push(edge_idx); + } + + let active_vertices: Vec<_> = incident_edges + .iter() + .enumerate() + .filter(|(_, incident)| !incident.is_empty()) + .map(|(vertex, _)| vertex) + .collect(); + let active_count = active_vertices.len(); + + if active_count == 0 || k >= active_count { + let mut source_cover = vec![0; num_source_vertices]; + for vertex in active_vertices { + source_cover[vertex] = 1; + } + return ReductionDecisionMinimumVertexCoverToHamiltonianCircuit { + target: HamiltonianCircuit::new(SimpleGraph::cycle(3)), + construction: ConstructionKind::FixedYes { source_cover }, + }; + } + + if k == 0 { + return ReductionDecisionMinimumVertexCoverToHamiltonianCircuit { + target: HamiltonianCircuit::new(SimpleGraph::path(3)), + construction: ConstructionKind::FixedNo { + num_source_vertices, + }, + }; + } + + let construction = TheoremConstruction { + num_source_vertices, + selector_count: k, + edges, + incident_edges, + }; + + let mut target_edges = BTreeSet::new(); + for (edge_idx, &(u, v)) in construction.edges.iter().enumerate() { + for position in 1..6 { + insert_edge( + &mut target_edges, + construction.gadget_vertex(edge_idx, u, position), + construction.gadget_vertex(edge_idx, u, position + 1), + ); + insert_edge( + &mut target_edges, + construction.gadget_vertex(edge_idx, v, position), + construction.gadget_vertex(edge_idx, v, position + 1), + ); + } + + insert_edge( + &mut target_edges, + construction.gadget_vertex(edge_idx, u, 3), + construction.gadget_vertex(edge_idx, v, 1), + ); + insert_edge( + &mut target_edges, + construction.gadget_vertex(edge_idx, v, 3), + construction.gadget_vertex(edge_idx, u, 1), + ); + insert_edge( + &mut target_edges, + construction.gadget_vertex(edge_idx, u, 6), + construction.gadget_vertex(edge_idx, v, 4), + ); + insert_edge( + &mut target_edges, + construction.gadget_vertex(edge_idx, v, 6), + construction.gadget_vertex(edge_idx, u, 4), + ); + } + + for vertex in construction.active_vertices() { + let incident = &construction.incident_edges[vertex]; + for window in incident.windows(2) { + insert_edge( + &mut target_edges, + construction.gadget_vertex(window[0], vertex, 6), + construction.gadget_vertex(window[1], vertex, 1), + ); + } + + let (start, end) = construction + .path_endpoints(vertex) + .expect("active vertices have path endpoints"); + for selector in 0..construction.selector_count { + insert_edge(&mut target_edges, selector, start); + insert_edge(&mut target_edges, selector, end); + } + } + + let target = HamiltonianCircuit::new(SimpleGraph::new( + construction.selector_count + 12 * construction.edges.len(), + target_edges.into_iter().collect(), + )); + + ReductionDecisionMinimumVertexCoverToHamiltonianCircuit { + target, + construction: ConstructionKind::Theorem(construction), + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::example_db::specs::assemble_rule_example; + use crate::export::SolutionPair; + + vec![crate::example_db::specs::RuleExampleSpec { + id: "decisionminimumvertexcover_to_hamiltoniancircuit", + build: || { + let source = Decision::new( + MinimumVertexCover::new(SimpleGraph::new(3, vec![(0, 1), (1, 2)]), vec![1, 1, 1]), + 1, + ); + let source_config = vec![0, 1, 0]; + let reduction = ReduceTo::>::reduce_to(&source); + let target_config = reduction.build_target_witness(&source_config); + assemble_rule_example( + &source, + reduction.target_problem(), + vec![SolutionPair { + source_config, + target_config, + }], + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/decisionminimumvertexcover_hamiltoniancircuit.rs"] +mod tests; diff --git a/src/rules/exactcoverby3sets_minimumaxiomset.rs b/src/rules/exactcoverby3sets_minimumaxiomset.rs new file mode 100644 index 00000000..a09c2ebb --- /dev/null +++ b/src/rules/exactcoverby3sets_minimumaxiomset.rs @@ -0,0 +1,97 @@ +//! Reduction from ExactCoverBy3Sets to MinimumAxiomSet. +//! +//! Universe elements become element-sentences, source subsets become set-sentences, +//! and the target optimum hits q = |U| / 3 exactly when the source has an exact cover. + +use crate::models::misc::MinimumAxiomSet; +use crate::models::set::ExactCoverBy3Sets; +use crate::reduction; +use crate::rules::traits::{ReduceTo, ReductionResult}; + +/// Result of reducing ExactCoverBy3Sets to MinimumAxiomSet. +#[derive(Debug, Clone)] +pub struct ReductionXC3SToMinimumAxiomSet { + target: MinimumAxiomSet, + source_universe_size: usize, + source_num_subsets: usize, +} + +impl ReductionResult for ReductionXC3SToMinimumAxiomSet { + type Source = ExactCoverBy3Sets; + type Target = MinimumAxiomSet; + + fn target_problem(&self) -> &Self::Target { + &self.target + } + + /// Extract the chosen source subsets from the set-sentence coordinates. + /// + /// For YES-instances, every optimal target witness of value q consists only of + /// q set-sentences, which form an exact cover. For NO-instances, the extracted + /// vector may be non-satisfying, which is expected for an `Or -> Min` rule. + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + let set_offset = self.source_universe_size; + (0..self.source_num_subsets) + .map(|j| usize::from(target_solution.get(set_offset + j).copied().unwrap_or(0) > 0)) + .collect() + } +} + +#[reduction(overhead = { + num_sentences = "universe_size + num_subsets", + num_true_sentences = "universe_size + num_subsets", + num_implications = "4 * num_subsets", +})] +impl ReduceTo for ExactCoverBy3Sets { + type Result = ReductionXC3SToMinimumAxiomSet; + + fn reduce_to(&self) -> Self::Result { + let universe_size = self.universe_size(); + let num_subsets = self.num_subsets(); + let num_sentences = universe_size + num_subsets; + + let mut implications = Vec::with_capacity(4 * num_subsets); + for (j, subset) in self.subsets().iter().enumerate() { + let set_sentence = universe_size + j; + for &element in subset { + implications.push((vec![set_sentence], element)); + } + implications.push((subset.to_vec(), set_sentence)); + } + + let target = + MinimumAxiomSet::new(num_sentences, (0..num_sentences).collect(), implications); + + ReductionXC3SToMinimumAxiomSet { + target, + source_universe_size: universe_size, + source_num_subsets: num_subsets, + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + + vec![crate::example_db::specs::RuleExampleSpec { + id: "exactcoverby3sets_to_minimumaxiomset", + build: || { + let source = ExactCoverBy3Sets::new( + 6, + vec![[0, 1, 2], [0, 3, 4], [2, 4, 5], [1, 3, 5], [0, 2, 4]], + ); + crate::example_db::specs::rule_example_with_witness::<_, MinimumAxiomSet>( + source, + SolutionPair { + source_config: vec![0, 0, 0, 1, 1], + target_config: vec![0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1], + }, + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/exactcoverby3sets_minimumaxiomset.rs"] +mod tests; diff --git a/src/rules/exactcoverby3sets_minimumfaultdetectiontestset.rs b/src/rules/exactcoverby3sets_minimumfaultdetectiontestset.rs new file mode 100644 index 00000000..427c9d01 --- /dev/null +++ b/src/rules/exactcoverby3sets_minimumfaultdetectiontestset.rs @@ -0,0 +1,88 @@ +//! Reduction from ExactCoverBy3Sets to MinimumFaultDetectionTestSet. +//! +//! The target DAG has one input per source subset, one internal vertex per +//! universe element, and a single shared output. Under the target model's +//! internal-vertex semantics, selecting an input-output pair covers exactly the +//! three internal vertices corresponding to that subset. + +use crate::models::misc::MinimumFaultDetectionTestSet; +use crate::models::set::ExactCoverBy3Sets; +use crate::reduction; +use crate::rules::traits::{ReduceTo, ReductionResult}; + +/// Result of reducing ExactCoverBy3Sets to MinimumFaultDetectionTestSet. +#[derive(Debug, Clone)] +pub struct ReductionXC3SToMinimumFaultDetectionTestSet { + target: MinimumFaultDetectionTestSet, +} + +impl ReductionResult for ReductionXC3SToMinimumFaultDetectionTestSet { + type Source = ExactCoverBy3Sets; + type Target = MinimumFaultDetectionTestSet; + + fn target_problem(&self) -> &MinimumFaultDetectionTestSet { + &self.target + } + + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + target_solution.to_vec() + } +} + +#[reduction(overhead = { + num_vertices = "num_subsets + universe_size + 1", + num_arcs = "3 * num_subsets + universe_size", + num_inputs = "num_subsets", + num_outputs = "1", +})] +impl ReduceTo for ExactCoverBy3Sets { + type Result = ReductionXC3SToMinimumFaultDetectionTestSet; + + fn reduce_to(&self) -> Self::Result { + let num_inputs = self.num_subsets(); + let element_offset = num_inputs; + let output = element_offset + self.universe_size(); + + let mut arcs = Vec::with_capacity(3 * self.num_subsets() + self.universe_size()); + for (set_idx, subset) in self.subsets().iter().enumerate() { + for &element in subset { + arcs.push((set_idx, element_offset + element)); + } + } + for element in 0..self.universe_size() { + arcs.push((element_offset + element, output)); + } + + ReductionXC3SToMinimumFaultDetectionTestSet { + target: MinimumFaultDetectionTestSet::new( + output + 1, + arcs, + (0..num_inputs).collect(), + vec![output], + ), + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + + vec![crate::example_db::specs::RuleExampleSpec { + id: "exactcoverby3sets_to_minimumfaultdetectiontestset", + build: || { + let source = ExactCoverBy3Sets::new(6, vec![[0, 1, 2], [3, 4, 5], [0, 3, 4]]); + crate::example_db::specs::rule_example_with_witness::<_, MinimumFaultDetectionTestSet>( + source, + SolutionPair { + source_config: vec![1, 1, 0], + target_config: vec![1, 1, 0], + }, + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/exactcoverby3sets_minimumfaultdetectiontestset.rs"] +mod tests; diff --git a/src/rules/feasibleregisterassignment_ilp.rs b/src/rules/feasibleregisterassignment_ilp.rs index 409c0218..def32c86 100644 --- a/src/rules/feasibleregisterassignment_ilp.rs +++ b/src/rules/feasibleregisterassignment_ilp.rs @@ -62,8 +62,9 @@ impl ReduceTo> for FeasibleRegisterAssignment { let latest_idx = |vertex: usize| -> usize { n + vertex }; let order_idx = |pair_idx: usize| -> usize { 2 * n + pair_idx }; - let mut constraints = - Vec::with_capacity(3 * num_pair_vars + 3 * n + 2 * self.num_arcs() + 2 * same_register_pairs.len()); + let mut constraints = Vec::with_capacity( + 3 * num_pair_vars + 3 * n + 2 * self.num_arcs() + 2 * same_register_pairs.len(), + ); for vertex in 0..n { constraints.push(LinearConstraint::le( @@ -95,19 +96,11 @@ impl ReduceTo> for FeasibleRegisterAssignment { let order_var = order_idx(pair_idx); constraints.push(LinearConstraint::le(vec![(order_var, 1.0)], 1.0)); constraints.push(LinearConstraint::ge( - vec![ - (time_idx(v), 1.0), - (time_idx(u), -1.0), - (order_var, -big_m), - ], + vec![(time_idx(v), 1.0), (time_idx(u), -1.0), (order_var, -big_m)], 1.0 - big_m, )); constraints.push(LinearConstraint::ge( - vec![ - (time_idx(u), 1.0), - (time_idx(v), -1.0), - (order_var, big_m), - ], + vec![(time_idx(u), 1.0), (time_idx(v), -1.0), (order_var, big_m)], 1.0, )); } diff --git a/src/rules/graph.rs b/src/rules/graph.rs index 1fcbc132..ef8a27ff 100644 --- a/src/rules/graph.rs +++ b/src/rules/graph.rs @@ -624,13 +624,14 @@ impl ReductionGraph { target_variant: &BTreeMap, limit: usize, ) -> Vec { - self.find_paths_up_to_mode( + self.find_paths_up_to_mode_bounded( source, source_variant, target, target_variant, ReductionMode::Witness, limit, + None, ) } @@ -644,6 +645,30 @@ impl ReductionGraph { target_variant: &BTreeMap, mode: ReductionMode, limit: usize, + ) -> Vec { + self.find_paths_up_to_mode_bounded( + source, + source_variant, + target, + target_variant, + mode, + limit, + None, + ) + } + + /// Like [`find_paths_up_to_mode`](Self::find_paths_up_to_mode) but also + /// bounds the number of intermediate nodes in each enumerated path. + #[allow(clippy::too_many_arguments)] + pub fn find_paths_up_to_mode_bounded( + &self, + source: &str, + source_variant: &BTreeMap, + target: &str, + target_variant: &BTreeMap, + mode: ReductionMode, + limit: usize, + max_intermediate_nodes: Option, ) -> Vec { let src = match self.lookup_node(source, source_variant) { Some(idx) => idx, @@ -658,7 +683,7 @@ impl ReductionGraph { Vec, _, std::hash::RandomState, - >(&self.graph, src, dst, 0, None) + >(&self.graph, src, dst, 0, max_intermediate_nodes) .take(limit) .collect(); diff --git a/src/rules/kcoloring_clustering.rs b/src/rules/kcoloring_clustering.rs new file mode 100644 index 00000000..3ce0ef69 --- /dev/null +++ b/src/rules/kcoloring_clustering.rs @@ -0,0 +1,85 @@ +//! Reduction from 3-Coloring to Clustering. +//! +//! Adjacent vertices must land in different clusters, so we encode edges with +//! distance 1 and non-edges with distance 0, then ask for 3 clusters with +//! diameter bound 0. + +use crate::models::graph::KColoring; +use crate::models::misc::Clustering; +use crate::reduction; +use crate::rules::traits::{ReduceTo, ReductionResult}; +use crate::topology::{Graph, SimpleGraph}; +use crate::variant::K3; + +/// Result of reducing KColoring (K=3) to Clustering. +#[derive(Debug, Clone)] +pub struct ReductionKColoringToClustering { + target: Clustering, + source_num_vertices: usize, +} + +impl ReductionResult for ReductionKColoringToClustering { + type Source = KColoring; + type Target = Clustering; + + fn target_problem(&self) -> &Self::Target { + &self.target + } + + /// Cluster labels are color labels. The empty-graph corner case uses one + /// dummy target element because Clustering forbids empty instances. + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + target_solution[..self.source_num_vertices.min(target_solution.len())].to_vec() + } +} + +fn build_distances(graph: &SimpleGraph) -> Vec> { + let n = graph.num_vertices(); + if n == 0 { + return vec![vec![0]]; + } + + let mut distances = vec![vec![0; n]; n]; + for (u, v) in graph.edges() { + distances[u][v] = 1; + distances[v][u] = 1; + } + distances +} + +#[reduction(overhead = { + num_elements = "num_vertices", +})] +impl ReduceTo for KColoring { + type Result = ReductionKColoringToClustering; + + fn reduce_to(&self) -> Self::Result { + ReductionKColoringToClustering { + target: Clustering::new(build_distances(self.graph()), self.num_colors(), 0), + source_num_vertices: self.graph().num_vertices(), + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + + vec![crate::example_db::specs::RuleExampleSpec { + id: "kcoloring_to_clustering", + build: || { + let source = KColoring::::new(SimpleGraph::cycle(5)); + crate::example_db::specs::rule_example_with_witness::<_, Clustering>( + source, + SolutionPair { + source_config: vec![0, 1, 0, 1, 2], + target_config: vec![0, 1, 0, 1, 2], + }, + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/kcoloring_clustering.rs"] +mod tests; diff --git a/src/rules/ksatisfiability_feasibleregisterassignment.rs b/src/rules/ksatisfiability_feasibleregisterassignment.rs index 94edd5ca..80f6d0ad 100644 --- a/src/rules/ksatisfiability_feasibleregisterassignment.rs +++ b/src/rules/ksatisfiability_feasibleregisterassignment.rs @@ -73,7 +73,8 @@ impl ReductionResult for Reduction3SATToFeasibleRegisterAssignment { (0..self.num_vars) .map(|var| { usize::from( - target_solution[s_pos_idx(var)] < target_solution[s_neg_idx(self.num_vars, var)], + target_solution[s_pos_idx(var)] + < target_solution[s_neg_idx(self.num_vars, var)], ) }) .collect() diff --git a/src/rules/ksatisfiability_quadraticdiophantineequations.rs b/src/rules/ksatisfiability_quadraticdiophantineequations.rs new file mode 100644 index 00000000..dc82fed9 --- /dev/null +++ b/src/rules/ksatisfiability_quadraticdiophantineequations.rs @@ -0,0 +1,134 @@ +//! Reduction from KSatisfiability (3-SAT) to Quadratic Diophantine Equations. +//! +//! This reuses the existing Manders-Adleman 3-SAT -> QuadraticCongruences +//! construction, then converts the bounded congruence witness into an equation +//! of the form x^2 + by = c. + +use crate::models::algebraic::{QuadraticCongruences, QuadraticDiophantineEquations}; +use crate::models::formula::KSatisfiability; +use crate::reduction; +use crate::rules::ksatisfiability_quadraticcongruences::Reduction3SATToQuadraticCongruences; +use crate::rules::traits::{ReduceTo, ReductionResult}; +use crate::variant::K3; +use num_bigint::BigUint; +use num_traits::One; + +/// Result of reducing 3-SAT to Quadratic Diophantine Equations. +#[derive(Debug, Clone)] +pub struct Reduction3SATToQuadraticDiophantineEquations { + target: QuadraticDiophantineEquations, + congruence_reduction: Reduction3SATToQuadraticCongruences, +} + +impl ReductionResult for Reduction3SATToQuadraticDiophantineEquations { + type Source = KSatisfiability; + type Target = QuadraticDiophantineEquations; + + fn target_problem(&self) -> &Self::Target { + &self.target + } + + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + let Some(x) = self.target.decode_witness(target_solution) else { + return self.congruence_reduction.extract_solution(&[]); + }; + + let Some(congruence_config) = self + .congruence_reduction + .target_problem() + .encode_witness(&x) + else { + return self.congruence_reduction.extract_solution(&[]); + }; + + self.congruence_reduction + .extract_solution(&congruence_config) + } +} + +fn no_instance() -> QuadraticDiophantineEquations { + QuadraticDiophantineEquations::new(1u32, 1u32, 1u32) +} + +fn translate_congruence(source: &QuadraticCongruences) -> QuadraticDiophantineEquations { + if source.c() <= &BigUint::one() { + return no_instance(); + } + + let h = source.c().clone() - BigUint::one(); + let h_squared = &h * &h; + if h_squared < *source.a() { + return no_instance(); + } + + let padding = ((&h_squared - source.a()) / source.b()) + BigUint::one(); + let c = source.a() + (source.b() * &padding); + + QuadraticDiophantineEquations::new(BigUint::one(), source.b().clone(), c) +} + +#[reduction(overhead = { + bit_length_a = "1", + bit_length_b = "(num_vars + num_clauses)^2 * log(num_vars + num_clauses + 1)", + bit_length_c = "(num_vars + num_clauses)^2 * log(num_vars + num_clauses + 1)", +})] +impl ReduceTo for KSatisfiability { + type Result = Reduction3SATToQuadraticDiophantineEquations; + + fn reduce_to(&self) -> Self::Result { + let congruence_reduction = ReduceTo::::reduce_to(self); + let target = translate_congruence(congruence_reduction.target_problem()); + + Reduction3SATToQuadraticDiophantineEquations { + target, + congruence_reduction, + } + } +} + +#[cfg(any(test, feature = "example-db"))] +fn canonical_source() -> KSatisfiability { + use crate::models::formula::CNFClause; + + KSatisfiability::::new(3, vec![CNFClause::new(vec![1, 2, 3])]) +} + +#[cfg(any(test, feature = "example-db"))] +fn canonical_witness() -> BigUint { + BigUint::parse_bytes( + b"1751451122102119958305507786775835374858648979796949071929887579732578264063983923970828608254544727567945005331103265320267846420581308180536461678218456421163010842022583797942541569366464959069523226763069748653830351684499364645098951736761394790343553460544021210289436100818494593367113721596780252083857888675004881955664228675079663569835052161564690932502575257394108174870151908279593037426404556490332761276593006398441245490978500647642893471046425509487910796951416870024826654351366508266859321005453091128123256128675758429165869380881549388896022325625404673271432251145796159394173120179999131480837018022329857587128653018300402", + 10, + ) + .expect("reference witness must parse") +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::example_db::specs::assemble_rule_example; + use crate::export::SolutionPair; + + vec![crate::example_db::specs::RuleExampleSpec { + id: "ksatisfiability_to_quadraticdiophantineequations", + build: || { + let source = canonical_source(); + let reduction = ReduceTo::::reduce_to(&source); + let target_config = reduction + .target_problem() + .encode_witness(&canonical_witness()) + .expect("reference witness must fit QDE encoding"); + + assemble_rule_example( + &source, + reduction.target_problem(), + vec![SolutionPair { + source_config: vec![1, 0, 0], + target_config, + }], + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/ksatisfiability_quadraticdiophantineequations.rs"] +mod tests; diff --git a/src/rules/ksatisfiability_registersufficiency.rs b/src/rules/ksatisfiability_registersufficiency.rs new file mode 100644 index 00000000..30caf6c2 --- /dev/null +++ b/src/rules/ksatisfiability_registersufficiency.rs @@ -0,0 +1,396 @@ +//! Reduction from KSatisfiability (3-SAT) to RegisterSufficiency. +//! +//! This is Sethi's Reduction I / Theorem 3.11 with the corrected extraction +//! rule from issue #872: +//! - the snapshot is taken immediately after `w[n]` +//! - `x_k = true` iff `x_pos[k]` has been computed by that snapshot +//! - at most one of `x_pos[k]`, `x_neg[k]` can have been computed by then +//! - the literal/clause edges keep Sethi's original orientation + +use crate::models::formula::KSatisfiability; +use crate::models::misc::RegisterSufficiency; +use crate::reduction; +use crate::rules::traits::{ReduceTo, ReductionResult}; +use crate::variant::K3; + +#[cfg_attr(not(any(test, feature = "example-db")), allow(dead_code))] +#[derive(Debug, Clone)] +struct SethiRegisterLayout { + num_vars: usize, + num_clauses: usize, + b_padding: usize, + a_start: usize, + b_start: usize, + c_start: usize, + f_start: usize, + initial_idx: usize, + d_idx: usize, + final_idx: usize, + r_starts: Vec, + s_starts: Vec, + t_starts: Vec, + u_start: usize, + w_start: usize, + x_pos_start: usize, + x_neg_start: usize, + z_start: usize, + total_vertices: usize, +} + +#[cfg_attr(not(any(test, feature = "example-db")), allow(dead_code))] +impl SethiRegisterLayout { + fn new(num_vars: usize, num_clauses: usize) -> Self { + let b_padding = (2 * num_vars).saturating_sub(num_clauses); + let mut next = 0usize; + + let a_start = next; + next += 2 * num_vars + 1; + + let b_start = next; + next += b_padding; + + let c_start = next; + next += num_clauses; + + let f_start = next; + next += 3 * num_clauses; + + let initial_idx = next; + let d_idx = next + 1; + let final_idx = next + 2; + next += 3; + + let mut r_starts = Vec::with_capacity(num_vars); + for var in 0..num_vars { + r_starts.push(next); + next += 2 * num_vars - 2 * var; + } + + let mut s_starts = Vec::with_capacity(num_vars); + for var in 0..num_vars { + s_starts.push(next); + next += 2 * num_vars - 2 * var - 1; + } + + let mut t_starts = Vec::with_capacity(num_vars); + for var in 0..num_vars { + t_starts.push(next); + next += 2 * num_vars - 2 * var - 1; + } + + let u_start = next; + next += 2 * num_vars; + + let w_start = next; + next += num_vars; + + let x_pos_start = next; + next += num_vars; + + let x_neg_start = next; + next += num_vars; + + let z_start = next; + next += num_vars; + + Self { + num_vars, + num_clauses, + b_padding, + a_start, + b_start, + c_start, + f_start, + initial_idx, + d_idx, + final_idx, + r_starts, + s_starts, + t_starts, + u_start, + w_start, + x_pos_start, + x_neg_start, + z_start, + total_vertices: next, + } + } + + fn total_vertices(&self) -> usize { + self.total_vertices + } + + fn bound(&self) -> usize { + 3 * self.num_clauses + 4 * self.num_vars + 1 + self.b_padding + } + + fn initial(&self) -> usize { + self.initial_idx + } + + fn d(&self) -> usize { + self.d_idx + } + + fn final_node(&self) -> usize { + self.final_idx + } + + fn a(&self, index: usize) -> usize { + self.a_start + index + } + + fn bnode(&self, index: usize) -> usize { + self.b_start + index + } + + fn c(&self, clause: usize) -> usize { + self.c_start + clause + } + + fn f(&self, clause: usize, literal_pos: usize) -> usize { + self.f_start + 3 * clause + literal_pos + } + + fn r(&self, var: usize, index: usize) -> usize { + self.r_starts[var] + index + } + + fn s(&self, var: usize, index: usize) -> usize { + self.s_starts[var] + index + } + + fn t(&self, var: usize, index: usize) -> usize { + self.t_starts[var] + index + } + + fn u(&self, var: usize, slot: usize) -> usize { + self.u_start + 2 * var + slot + } + + fn w(&self, var: usize) -> usize { + self.w_start + var + } + + fn x_pos(&self, var: usize) -> usize { + self.x_pos_start + var + } + + fn x_neg(&self, var: usize) -> usize { + self.x_neg_start + var + } + + fn z(&self, var: usize) -> usize { + self.z_start + var + } +} + +#[derive(Debug, Clone)] +pub struct Reduction3SATToRegisterSufficiency { + target: RegisterSufficiency, + layout: SethiRegisterLayout, +} + +impl ReductionResult for Reduction3SATToRegisterSufficiency { + type Source = KSatisfiability; + type Target = RegisterSufficiency; + + fn target_problem(&self) -> &Self::Target { + &self.target + } + + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + if self.layout.num_vars == 0 { + return Vec::new(); + } + + let cutoff = target_solution[self.layout.w(self.layout.num_vars - 1)]; + (0..self.layout.num_vars) + .map(|var| { + let x_pos_before = target_solution[self.layout.x_pos(var)] < cutoff; + let x_neg_before = target_solution[self.layout.x_neg(var)] < cutoff; + debug_assert!( + !(x_pos_before && x_neg_before), + "Sethi extraction expects at most one of x_pos/x_neg before w[n]", + ); + usize::from(x_pos_before) + }) + .collect() + } +} + +#[reduction(overhead = { + num_vertices = "3 * num_vars^2 + 9 * num_vars + 4 * num_clauses + register_sufficiency_padding + 4", + num_arcs = "6 * num_vars^2 + 19 * num_vars + 16 * num_clauses + 2 * register_sufficiency_padding + 1", + bound = "3 * num_clauses + 4 * num_vars + 1 + register_sufficiency_padding", +})] +impl ReduceTo for KSatisfiability { + type Result = Reduction3SATToRegisterSufficiency; + + fn reduce_to(&self) -> Self::Result { + let layout = SethiRegisterLayout::new(self.num_vars(), self.num_clauses()); + let mut arcs = Vec::with_capacity( + 6 * self.num_vars() * self.num_vars() + + 19 * self.num_vars() + + 16 * self.num_clauses() + + 2 * layout.b_padding + + 1, + ); + + for index in 0..(2 * self.num_vars() + 1) { + arcs.push((layout.initial(), layout.a(index))); + } + for index in 0..layout.b_padding { + arcs.push((layout.initial(), layout.bnode(index))); + } + for clause in 0..self.num_clauses() { + for literal_pos in 0..3 { + arcs.push((layout.initial(), layout.f(clause, literal_pos))); + } + } + for var in 0..self.num_vars() { + arcs.push((layout.initial(), layout.u(var, 0))); + arcs.push((layout.initial(), layout.u(var, 1))); + } + + for clause in 0..self.num_clauses() { + arcs.push((layout.c(clause), layout.initial())); + } + for var in 0..self.num_vars() { + for index in 0..(2 * self.num_vars() - 2 * var) { + arcs.push((layout.r(var, index), layout.initial())); + } + for index in 0..(2 * self.num_vars() - 2 * var - 1) { + arcs.push((layout.s(var, index), layout.initial())); + arcs.push((layout.t(var, index), layout.initial())); + } + arcs.push((layout.w(var), layout.initial())); + } + + for var in 0..self.num_vars() { + arcs.push((layout.final_node(), layout.w(var))); + arcs.push((layout.final_node(), layout.x_pos(var))); + arcs.push((layout.final_node(), layout.x_neg(var))); + arcs.push((layout.final_node(), layout.z(var))); + } + arcs.push((layout.final_node(), layout.initial())); + arcs.push((layout.final_node(), layout.d())); + + for var in 0..self.num_vars() { + arcs.push((layout.x_pos(var), layout.z(var))); + arcs.push((layout.x_neg(var), layout.z(var))); + arcs.push((layout.x_pos(var), layout.u(var, 0))); + arcs.push((layout.x_neg(var), layout.u(var, 1))); + } + + for var in 0..self.num_vars() { + arcs.push((layout.w(var), layout.u(var, 0))); + arcs.push((layout.w(var), layout.u(var, 1))); + } + + for var in 0..self.num_vars() { + for index in 0..(2 * self.num_vars() - 2 * var - 1) { + arcs.push((layout.x_pos(var), layout.s(var, index))); + arcs.push((layout.x_neg(var), layout.t(var, index))); + } + for index in 0..(2 * self.num_vars() - 2 * var) { + arcs.push((layout.z(var), layout.r(var, index))); + } + } + + for var in 1..self.num_vars() { + arcs.push((layout.z(var), layout.w(var - 1))); + arcs.push((layout.z(var), layout.z(var - 1))); + } + if self.num_vars() > 0 { + let last_var = self.num_vars() - 1; + for clause in 0..self.num_clauses() { + arcs.push((layout.c(clause), layout.w(last_var))); + arcs.push((layout.c(clause), layout.z(last_var))); + } + } + + for clause in 0..self.num_clauses() { + for literal_pos in 0..3 { + arcs.push((layout.c(clause), layout.f(clause, literal_pos))); + } + } + + for index in 0..layout.b_padding { + arcs.push((layout.d(), layout.bnode(index))); + } + for clause in 0..self.num_clauses() { + arcs.push((layout.d(), layout.c(clause))); + } + + for (clause_idx, clause) in self.clauses().iter().enumerate() { + let mut lit_nodes = [0usize; 3]; + let mut neg_nodes = [0usize; 3]; + + for (literal_pos, &literal) in clause.literals.iter().enumerate() { + let var = literal.unsigned_abs() as usize - 1; + if literal > 0 { + lit_nodes[literal_pos] = layout.x_pos(var); + neg_nodes[literal_pos] = layout.x_neg(var); + } else { + lit_nodes[literal_pos] = layout.x_neg(var); + neg_nodes[literal_pos] = layout.x_pos(var); + } + arcs.push((lit_nodes[literal_pos], layout.f(clause_idx, literal_pos))); + } + + for (earlier, &neg_node) in neg_nodes.iter().enumerate() { + for later in (earlier + 1)..3 { + arcs.push((neg_node, layout.f(clause_idx, later))); + } + } + } + + Reduction3SATToRegisterSufficiency { + target: RegisterSufficiency::new(layout.total_vertices(), arcs, layout.bound()), + layout, + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + use crate::models::formula::CNFClause; + + vec![crate::example_db::specs::RuleExampleSpec { + id: "ksatisfiability_to_registersufficiency", + build: || { + let source = KSatisfiability::::new( + 3, + vec![ + CNFClause::new(vec![1, -2, 3]), + CNFClause::new(vec![-1, 2, -3]), + ], + ); + let to_registers = + as ReduceTo>::reduce_to(&source); + + // Use the B&B solver on the RS instance directly, avoiding the + // expensive RS→ILP chain (17K vars, minutes on CI). + let target_config = to_registers + .target_problem() + .solve_exact() + .expect("satisfying 3-SAT instance must yield a feasible RS witness"); + let source_config = to_registers.extract_solution(&target_config); + + crate::example_db::specs::assemble_rule_example( + &source, + to_registers.target_problem(), + vec![SolutionPair { + source_config, + target_config, + }], + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/ksatisfiability_registersufficiency.rs"] +mod tests; diff --git a/src/rules/minimumfaultdetectiontestset_ilp.rs b/src/rules/minimumfaultdetectiontestset_ilp.rs new file mode 100644 index 00000000..3e84ff1f --- /dev/null +++ b/src/rules/minimumfaultdetectiontestset_ilp.rs @@ -0,0 +1,141 @@ +//! Reduction from MinimumFaultDetectionTestSet to ILP. +//! +//! Each input-output pair becomes a binary decision variable. For every +//! internal vertex, the ILP requires at least one selected pair whose coverage +//! set contains that vertex. Minimizing the sum of the pair variables therefore +//! recovers the minimum-size covering test set. + +use crate::models::algebraic::{LinearConstraint, ObjectiveSense, ILP}; +use crate::models::misc::MinimumFaultDetectionTestSet; +use crate::reduction; +use crate::rules::traits::{ReduceTo, ReductionResult}; +use std::collections::VecDeque; + +/// Result of reducing MinimumFaultDetectionTestSet to ILP. +#[derive(Debug, Clone)] +pub struct ReductionMFDTSToILP { + target: ILP, +} + +impl ReductionResult for ReductionMFDTSToILP { + type Source = MinimumFaultDetectionTestSet; + type Target = ILP; + + fn target_problem(&self) -> &ILP { + &self.target + } + + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + target_solution.to_vec() + } +} + +#[reduction(overhead = { + num_vars = "num_inputs * num_outputs", + num_constraints = "num_vertices - num_inputs - num_outputs", +})] +impl ReduceTo> for MinimumFaultDetectionTestSet { + type Result = ReductionMFDTSToILP; + + fn reduce_to(&self) -> Self::Result { + fn reachable(adj: &[Vec], start: usize) -> Vec { + let mut seen = vec![false; adj.len()]; + let mut queue = VecDeque::new(); + seen[start] = true; + queue.push_back(start); + + while let Some(vertex) = queue.pop_front() { + for &next in &adj[vertex] { + if !seen[next] { + seen[next] = true; + queue.push_back(next); + } + } + } + + seen + } + + let mut adj = vec![Vec::new(); self.num_vertices()]; + let mut rev_adj = vec![Vec::new(); self.num_vertices()]; + for &(tail, head) in self.arcs() { + adj[tail].push(head); + rev_adj[head].push(tail); + } + + let input_reachability: Vec> = self + .inputs() + .iter() + .map(|&input| reachable(&adj, input)) + .collect(); + let output_reachability: Vec> = self + .outputs() + .iter() + .map(|&output| reachable(&rev_adj, output)) + .collect(); + + let mut boundary = vec![false; self.num_vertices()]; + for &input in self.inputs() { + boundary[input] = true; + } + for &output in self.outputs() { + boundary[output] = true; + } + + let num_pairs = self.num_inputs() * self.num_outputs(); + let internal_vertices: Vec = (0..self.num_vertices()) + .filter(|&vertex| !boundary[vertex]) + .collect(); + + let constraints: Vec = internal_vertices + .into_iter() + .map(|vertex| { + let mut terms = Vec::new(); + for (input_idx, input_cov) in input_reachability.iter().enumerate() { + for (output_idx, output_cov) in output_reachability.iter().enumerate() { + if input_cov[vertex] && output_cov[vertex] { + let pair_idx = input_idx * self.num_outputs() + output_idx; + terms.push((pair_idx, 1.0)); + } + } + } + LinearConstraint::ge(terms, 1.0) + }) + .collect(); + + let objective = (0..num_pairs).map(|pair_idx| (pair_idx, 1.0)).collect(); + + ReductionMFDTSToILP { + target: ILP::new(num_pairs, constraints, objective, ObjectiveSense::Minimize), + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + vec![crate::example_db::specs::RuleExampleSpec { + id: "minimumfaultdetectiontestset_to_ilp", + build: || { + let source = MinimumFaultDetectionTestSet::new( + 7, + vec![ + (0, 2), + (0, 3), + (1, 3), + (1, 4), + (2, 5), + (3, 5), + (3, 6), + (4, 6), + ], + vec![0, 1], + vec![5, 6], + ); + crate::example_db::specs::rule_example_via_ilp::<_, bool>(source) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/minimumfaultdetectiontestset_ilp.rs"] +mod tests; diff --git a/src/rules/minimumfeedbackarcset_maximumlikelihoodranking.rs b/src/rules/minimumfeedbackarcset_maximumlikelihoodranking.rs new file mode 100644 index 00000000..3b07146a --- /dev/null +++ b/src/rules/minimumfeedbackarcset_maximumlikelihoodranking.rs @@ -0,0 +1,114 @@ +//! Reduction from MinimumFeedbackArcSet to MaximumLikelihoodRanking. +//! +//! On unit-weight instances, a ranking induces exactly the feedback arc set of +//! backward arcs. The target matrix uses the skew-symmetric `c = 0` encoding: +//! one-way arcs become `+/-1`, while bidirectional pairs and missing pairs map +//! to `0`. + +use crate::models::graph::MinimumFeedbackArcSet; +use crate::models::misc::MaximumLikelihoodRanking; +use crate::reduction; +use crate::rules::traits::{ReduceTo, ReductionResult}; + +#[allow(clippy::needless_range_loop)] +fn build_skew_symmetric_matrix(problem: &MinimumFeedbackArcSet) -> Vec> { + let n = problem.num_vertices(); + let graph = problem.graph(); + let mut matrix = vec![vec![0i32; n]; n]; + + for i in 0..n { + for j in (i + 1)..n { + let ij = graph.has_arc(i, j); + let ji = graph.has_arc(j, i); + if ij && !ji { + matrix[i][j] = 1; + matrix[j][i] = -1; + } else if ji && !ij { + matrix[i][j] = -1; + matrix[j][i] = 1; + } + } + } + + matrix +} + +/// Result of reducing MinimumFeedbackArcSet to MaximumLikelihoodRanking. +#[derive(Debug, Clone)] +pub struct ReductionFASToMLR { + target: MaximumLikelihoodRanking, + source_arcs: Vec<(usize, usize)>, +} + +impl ReductionResult for ReductionFASToMLR { + type Source = MinimumFeedbackArcSet; + type Target = MaximumLikelihoodRanking; + + fn target_problem(&self) -> &Self::Target { + &self.target + } + + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + self.source_arcs + .iter() + .map(|&(u, v)| usize::from(target_solution[u] > target_solution[v])) + .collect() + } +} + +#[reduction( + overhead = { + num_items = "num_vertices", + } +)] +impl ReduceTo for MinimumFeedbackArcSet { + type Result = ReductionFASToMLR; + + fn reduce_to(&self) -> Self::Result { + assert!( + self.weights().iter().all(|&weight| weight == 1), + "MinimumFeedbackArcSet -> MaximumLikelihoodRanking requires unit arc weights" + ); + + ReductionFASToMLR { + target: MaximumLikelihoodRanking::new(build_skew_symmetric_matrix(self)), + source_arcs: self.graph().arcs(), + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + use crate::solvers::BruteForce; + + vec![crate::example_db::specs::RuleExampleSpec { + id: "minimumfeedbackarcset_to_maximumlikelihoodranking", + build: || { + let source = MinimumFeedbackArcSet::new( + crate::topology::DirectedGraph::new( + 5, + vec![(0, 1), (1, 2), (2, 0), (2, 3), (3, 4), (4, 2), (0, 4)], + ), + vec![1i32; 7], + ); + let reduction = ReduceTo::::reduce_to(&source); + let target_witness = BruteForce::new() + .find_witness(reduction.target_problem()) + .expect("target should have an optimum"); + let source_witness = reduction.extract_solution(&target_witness); + + crate::example_db::specs::rule_example_with_witness::<_, MaximumLikelihoodRanking>( + source, + SolutionPair { + source_config: source_witness, + target_config: target_witness, + }, + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/minimumfeedbackarcset_maximumlikelihoodranking.rs"] +mod tests; diff --git a/src/rules/minimumfeedbackvertexset_minimumcodegenerationunlimitedregisters.rs b/src/rules/minimumfeedbackvertexset_minimumcodegenerationunlimitedregisters.rs new file mode 100644 index 00000000..82f5db3d --- /dev/null +++ b/src/rules/minimumfeedbackvertexset_minimumcodegenerationunlimitedregisters.rs @@ -0,0 +1,188 @@ +//! Reduction from MinimumFeedbackVertexSet to MinimumCodeGenerationUnlimitedRegisters. +//! +//! The Aho-Johnson-Ullman chain gadget construction: for each source vertex x +//! with outgoing edges (x,y₁),...,(x,y_d), create a chain of internal nodes +//! x¹,...,x^d where xⁱ has left child x^{i-1} and right child y_i⁰ (the leaf). +//! Copies in an optimal program correspond exactly to a minimum feedback vertex set. + +use crate::models::graph::MinimumFeedbackVertexSet; +use crate::models::misc::MinimumCodeGenerationUnlimitedRegisters; +use crate::reduction; +use crate::rules::traits::{ReduceTo, ReductionResult}; + +/// Result of reducing MinimumFeedbackVertexSet to MinimumCodeGenerationUnlimitedRegisters. +#[derive(Debug, Clone)] +pub struct ReductionFVSToCodeGen { + target: MinimumCodeGenerationUnlimitedRegisters, + /// Number of source vertices (= number of leaves in the target DAG). + num_source_vertices: usize, + /// For each source vertex x, the target index of x¹ (first chain node). + /// `None` if vertex x has out-degree 0 (no chain, leaf only). + chain_start: Vec>, + /// For each leaf x⁰ (source vertex x), the list of internal node target + /// indices that use x⁰ as a right child. + right_child_users: Vec>, +} + +impl ReductionResult for ReductionFVSToCodeGen { + type Source = MinimumFeedbackVertexSet; + type Target = MinimumCodeGenerationUnlimitedRegisters; + + fn target_problem(&self) -> &Self::Target { + &self.target + } + + /// Extract a feedback vertex set from a target evaluation-order solution. + /// + /// A leaf register R_x is destroyed when x¹ executes (left operand). + /// If any right-child user of x⁰ is evaluated after x¹, a LOAD was needed, + /// meaning x is in the feedback vertex set. + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + let n = self.num_source_vertices; + let mut source_config = vec![0usize; n]; + + // target_solution[i] = evaluation position for the i-th internal node + // Internal nodes are indices n, n+1, ..., n+m-1 (sorted), so + // target_solution[j] = position for internal node (n + j). + + // eval_pos[j] = evaluation position for internal node (n + j) + let eval_pos = target_solution; + + for (x, cfg) in source_config.iter_mut().enumerate() { + if let Some(chain_start_idx) = self.chain_start[x] { + let start_j = chain_start_idx - n; + let start_pos = eval_pos[start_j]; + + for &user_idx in &self.right_child_users[x] { + let user_j = user_idx - n; + if eval_pos[user_j] > start_pos { + *cfg = 1; + break; + } + } + } + } + + source_config + } +} + +#[reduction( + overhead = { + num_vertices = "num_vertices + num_arcs", + } +)] +impl ReduceTo for MinimumFeedbackVertexSet { + type Result = ReductionFVSToCodeGen; + + fn reduce_to(&self) -> Self::Result { + let n = self.graph().num_vertices(); + let m = self.graph().num_arcs(); + + // Build outgoing adjacency list + let mut out_neighbors: Vec> = vec![vec![]; n]; + for (u, v) in self.graph().arcs() { + out_neighbors[u].push(v); + } + + // Assign internal node indices: leaves are 0..n, chain nodes are n..n+m + let mut left_arcs = Vec::with_capacity(m); + let mut right_arcs = Vec::with_capacity(m); + let mut chain_start = vec![None; n]; + let mut right_child_users: Vec> = vec![vec![]; n]; + + let mut next_internal = n; // first internal node index + for x in 0..n { + let neighbors = &out_neighbors[x]; + let d = neighbors.len(); + if d == 0 { + continue; + } + + chain_start[x] = Some(next_internal); + + for (i, &neighbor) in neighbors.iter().enumerate() { + let node_idx = next_internal + i; + // Left child: predecessor in chain + let left_child = if i == 0 { + x // leaf x⁰ + } else { + next_internal + i - 1 // previous chain node + }; + // Right child: leaf y_i⁰ + let right_child = neighbor; // leaf index = source vertex index + + left_arcs.push((node_idx, left_child)); + right_arcs.push((node_idx, right_child)); + right_child_users[right_child].push(node_idx); + } + + next_internal += d; + } + + debug_assert_eq!(next_internal, n + m); + + let target = MinimumCodeGenerationUnlimitedRegisters::new(n + m, left_arcs, right_arcs); + + ReductionFVSToCodeGen { + target, + num_source_vertices: n, + chain_start, + right_child_users, + } + } +} + +#[cfg(any(test, feature = "example-db"))] +fn issue_example_source() -> MinimumFeedbackVertexSet { + use crate::topology::DirectedGraph; + // 3-cycle: a→b→c→a (vertices 0,1,2) + MinimumFeedbackVertexSet::new( + DirectedGraph::new(3, vec![(0, 1), (1, 2), (2, 0)]), + vec![1i32; 3], + ) +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + use crate::solvers::BruteForce; + + vec![crate::example_db::specs::RuleExampleSpec { + id: "minimumfeedbackvertexset_to_minimumcodegenerationunlimitedregisters", + build: || { + let source = issue_example_source(); + let reduction = ReduceTo::::reduce_to(&source); + + // Find a target witness whose extracted source solution matches an optimal FVS + let solver = BruteForce::new(); + let source_witnesses = solver.find_all_witnesses(&source); + let target_witnesses = solver.find_all_witnesses(reduction.target_problem()); + + let (source_config, target_config) = target_witnesses + .iter() + .find_map(|tw| { + let extracted = reduction.extract_solution(tw); + if source_witnesses.contains(&extracted) { + Some((extracted, tw.clone())) + } else { + None + } + }) + .expect("canonical FVS -> CodeGen example must have matching witness"); + + crate::example_db::specs::assemble_rule_example( + &source, + reduction.target_problem(), + vec![SolutionPair { + source_config, + target_config, + }], + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/minimumfeedbackvertexset_minimumcodegenerationunlimitedregisters.rs"] +mod tests; diff --git a/src/rules/minimumvertexcover_minimumweightandorgraph.rs b/src/rules/minimumvertexcover_minimumweightandorgraph.rs new file mode 100644 index 00000000..feeefdf1 --- /dev/null +++ b/src/rules/minimumvertexcover_minimumweightandorgraph.rs @@ -0,0 +1,115 @@ +//! Reduction from MinimumVertexCover to MinimumWeightAndOrGraph. + +use crate::models::graph::MinimumVertexCover; +use crate::models::misc::MinimumWeightAndOrGraph; +use crate::reduction; +use crate::rules::traits::{ReduceTo, ReductionResult}; +use crate::topology::Graph; +use crate::topology::SimpleGraph; + +/// Result of reducing MinimumVertexCover to MinimumWeightAndOrGraph. +#[derive(Debug, Clone)] +pub struct ReductionVCToAndOrGraph { + target: MinimumWeightAndOrGraph, + sink_arc_start: usize, + num_source_vertices: usize, +} + +impl ReductionResult for ReductionVCToAndOrGraph { + type Source = MinimumVertexCover; + type Target = MinimumWeightAndOrGraph; + + fn target_problem(&self) -> &Self::Target { + &self.target + } + + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + (0..self.num_source_vertices) + .map(|j| usize::from(target_solution.get(self.sink_arc_start + j) == Some(&1))) + .collect() + } +} + +#[reduction( + overhead = { + num_vertices = "1 + num_edges + 2 * num_vertices", + num_arcs = "3 * num_edges + num_vertices", + } +)] +impl ReduceTo for MinimumVertexCover { + type Result = ReductionVCToAndOrGraph; + + fn reduce_to(&self) -> Self::Result { + let n = self.graph().num_vertices(); + let edges = self.graph().edges(); + let m = edges.len(); + + let num_target_vertices = 1 + m + (2 * n); + let mut gate_types = vec![None; num_target_vertices]; + gate_types[0] = Some(true); + for gate in gate_types.iter_mut().skip(1).take(m + n) { + *gate = Some(false); + } + + let edge_vertex = |i: usize| 1 + i; + let cover_vertex = |j: usize| 1 + m + j; + let sink_vertex = |j: usize| 1 + m + n + j; + + let mut arcs = Vec::with_capacity((3 * m) + n); + let mut arc_weights = Vec::with_capacity((3 * m) + n); + + for i in 0..m { + arcs.push((0, edge_vertex(i))); + arc_weights.push(1); + } + + for (i, &(u, v)) in edges.iter().enumerate() { + arcs.push((edge_vertex(i), cover_vertex(u))); + arc_weights.push(1); + arcs.push((edge_vertex(i), cover_vertex(v))); + arc_weights.push(1); + } + + let sink_arc_start = arcs.len(); + for (j, &weight) in self.weights().iter().enumerate() { + arcs.push((cover_vertex(j), sink_vertex(j))); + arc_weights.push(weight); + } + + let target = + MinimumWeightAndOrGraph::new(num_target_vertices, arcs, 0, gate_types, arc_weights); + + ReductionVCToAndOrGraph { + target, + sink_arc_start, + num_source_vertices: n, + } + } +} + +#[cfg(any(test, feature = "example-db"))] +fn issue_example_source() -> MinimumVertexCover { + MinimumVertexCover::new(SimpleGraph::new(3, vec![(0, 1), (1, 2)]), vec![1i32; 3]) +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + + vec![crate::example_db::specs::RuleExampleSpec { + id: "minimumvertexcover_to_minimumweightandorgraph", + build: || { + crate::example_db::specs::rule_example_with_witness::<_, MinimumWeightAndOrGraph>( + issue_example_source(), + SolutionPair { + source_config: vec![0, 1, 0], + target_config: vec![1, 1, 0, 1, 1, 0, 0, 1, 0], + }, + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/minimumvertexcover_minimumweightandorgraph.rs"] +mod tests; diff --git a/src/rules/mod.rs b/src/rules/mod.rs index 38205d0e..caf70070 100644 --- a/src/rules/mod.rs +++ b/src/rules/mod.rs @@ -12,8 +12,13 @@ pub(crate) mod circuit_sat; pub(crate) mod circuit_spinglass; mod closestvectorproblem_qubo; pub(crate) mod coloring_qubo; +pub(crate) mod decisionminimumdominatingset_minimumsummulticenter; +pub(crate) mod decisionminimumdominatingset_minmaxmulticenter; +pub(crate) mod decisionminimumvertexcover_hamiltoniancircuit; pub(crate) mod exactcoverby3sets_algebraicequationsovergf2; pub(crate) mod exactcoverby3sets_maximumsetpacking; +pub(crate) mod exactcoverby3sets_minimumaxiomset; +pub(crate) mod exactcoverby3sets_minimumfaultdetectiontestset; pub(crate) mod exactcoverby3sets_minimumweightsolutiontolinearequations; pub(crate) mod exactcoverby3sets_staffscheduling; pub(crate) mod exactcoverby3sets_subsetproduct; @@ -42,6 +47,7 @@ pub(crate) mod kclique_balancedcompletebipartitesubgraph; pub(crate) mod kclique_conjunctivebooleanquery; pub(crate) mod kclique_subgraphisomorphism; mod kcoloring_casts; +pub(crate) mod kcoloring_clustering; pub(crate) mod kcoloring_partitionintocliques; pub(crate) mod kcoloring_twodimensionalconsecutivesets; mod knapsack_qubo; @@ -58,7 +64,9 @@ pub(crate) mod ksatisfiability_monochromatictriangle; pub(crate) mod ksatisfiability_oneinthreesatisfiability; pub(crate) mod ksatisfiability_preemptivescheduling; pub(crate) mod ksatisfiability_quadraticcongruences; +pub(crate) mod ksatisfiability_quadraticdiophantineequations; pub(crate) mod ksatisfiability_qubo; +pub(crate) mod ksatisfiability_registersufficiency; pub(crate) mod ksatisfiability_simultaneousincongruences; pub(crate) mod ksatisfiability_subsetsum; pub(crate) mod ksatisfiability_timetabledesign; @@ -75,6 +83,8 @@ mod maximumindependentset_triangular; pub(crate) mod maximummatching_maximumsetpacking; mod maximumsetpacking_casts; pub(crate) mod maximumsetpacking_qubo; +pub(crate) mod minimumfeedbackarcset_maximumlikelihoodranking; +pub(crate) mod minimumfeedbackvertexset_minimumcodegenerationunlimitedregisters; pub(crate) mod minimummultiwaycut_qubo; pub(crate) mod minimumvertexcover_ensemblecomputation; pub(crate) mod minimumvertexcover_longestcommonsubsequence; @@ -84,6 +94,7 @@ pub(crate) mod minimumvertexcover_minimumfeedbackvertexset; pub(crate) mod minimumvertexcover_minimumhittingset; pub(crate) mod minimumvertexcover_minimummaximalmatching; pub(crate) mod minimumvertexcover_minimumsetcovering; +pub(crate) mod minimumvertexcover_minimumweightandorgraph; pub(crate) mod naesatisfiability_maxcut; pub(crate) mod naesatisfiability_partitionintoperfectmatchings; pub(crate) mod naesatisfiability_setsplitting; @@ -98,6 +109,7 @@ pub(crate) mod partition_sequencingtominimizetardytaskweight; pub(crate) mod partition_sequencingwithinintervals; pub(crate) mod partition_shortestweightconstrainedpath; pub(crate) mod partition_subsetsum; +pub(crate) mod partitionintocliques_minimumcoveringbycliques; pub(crate) mod partitionintopathsoflength2_boundedcomponentspanningforest; pub(crate) mod rootedtreearrangement_rootedtreestorageassignment; pub(crate) mod sat_circuitsat; @@ -152,6 +164,8 @@ pub(crate) mod capacityassignment_ilp; #[cfg(feature = "ilp-solver")] pub(crate) mod circuit_ilp; #[cfg(feature = "ilp-solver")] +pub(crate) mod clustering_ilp; +#[cfg(feature = "ilp-solver")] pub(crate) mod coloring_ilp; #[cfg(feature = "ilp-solver")] pub(crate) mod consecutiveblockminimization_ilp; @@ -172,10 +186,10 @@ pub(crate) mod exactcoverby3sets_ilp; #[cfg(feature = "ilp-solver")] pub(crate) mod expectedretrievalcost_ilp; #[cfg(feature = "ilp-solver")] -pub(crate) mod feasibleregisterassignment_ilp; -#[cfg(feature = "ilp-solver")] pub(crate) mod factoring_ilp; #[cfg(feature = "ilp-solver")] +pub(crate) mod feasibleregisterassignment_ilp; +#[cfg(feature = "ilp-solver")] pub(crate) mod flowshopscheduling_ilp; #[cfg(feature = "ilp-solver")] pub(crate) mod graphpartitioning_ilp; @@ -236,6 +250,8 @@ pub(crate) mod minimumedgecostflow_ilp; #[cfg(feature = "ilp-solver")] pub(crate) mod minimumexternalmacrodatacompression_ilp; #[cfg(feature = "ilp-solver")] +pub(crate) mod minimumfaultdetectiontestset_ilp; +#[cfg(feature = "ilp-solver")] pub(crate) mod minimumfeedbackarcset_ilp; #[cfg(feature = "ilp-solver")] pub(crate) mod minimumfeedbackvertexset_ilp; @@ -302,6 +318,8 @@ pub(crate) mod qubo_ilp; #[cfg(feature = "ilp-solver")] pub(crate) mod rectilinearpicturecompression_ilp; #[cfg(feature = "ilp-solver")] +pub(crate) mod registersufficiency_ilp; +#[cfg(feature = "ilp-solver")] pub(crate) mod resourceconstrainedscheduling_ilp; #[cfg(feature = "ilp-solver")] pub(crate) mod rootedtreestorageassignment_ilp; @@ -371,10 +389,16 @@ pub(crate) fn canonical_rule_example_specs() -> Vec Vec Vec Vec Vec Vec Vec Vec Vec, +} + +impl OrlinLayout { + fn new(graph: &SimpleGraph) -> Self { + let n = graph.num_vertices(); + let directed_pairs = (0..n) + .flat_map(|i| { + (0..n).filter_map(move |j| (i != j && graph.has_edge(i, j)).then_some((i, j))) + }) + .collect(); + Self { + num_source_vertices: n, + directed_pairs, + } + } + + fn num_directed_pairs(&self) -> usize { + self.directed_pairs.len() + } + + fn x(&self, i: usize) -> usize { + i + } + + fn y(&self, i: usize) -> usize { + self.num_source_vertices + i + } + + fn a(&self, directed_pair_index: usize) -> usize { + 2 * self.num_source_vertices + directed_pair_index + } + + fn b(&self, directed_pair_index: usize) -> usize { + 2 * self.num_source_vertices + self.num_directed_pairs() + directed_pair_index + } + + fn z_left(&self) -> usize { + 2 * self.num_source_vertices + 2 * self.num_directed_pairs() + } + + fn z_right(&self) -> usize { + self.z_left() + 1 + } + + fn left_vertices(&self) -> Vec { + let mut vertices = (0..self.num_source_vertices) + .map(|i| self.x(i)) + .collect::>(); + vertices.extend((0..self.num_directed_pairs()).map(|idx| self.a(idx))); + vertices + } + + fn right_vertices(&self) -> Vec { + let mut vertices = (0..self.num_source_vertices) + .map(|i| self.y(i)) + .collect::>(); + vertices.extend((0..self.num_directed_pairs()).map(|idx| self.b(idx))); + vertices + } + + fn total_vertices(&self) -> usize { + self.z_right() + 1 + } +} + +fn add_clique_edges(vertices: &[usize], edges: &mut Vec<(usize, usize)>) { + for i in 0..vertices.len() { + for j in (i + 1)..vertices.len() { + edges.push((vertices[i], vertices[j])); + } + } +} + +fn invalid_source_solution(num_source_vertices: usize, num_source_cliques: usize) -> Vec { + vec![num_source_cliques; num_source_vertices] +} + +/// Result of reducing PartitionIntoCliques to MinimumCoveringByCliques. +#[derive(Debug, Clone)] +pub struct ReductionPartitionIntoCliquesToMinimumCoveringByCliques { + target: MinimumCoveringByCliques, + source_graph: SimpleGraph, + source_num_cliques: usize, +} + +impl ReductionResult for ReductionPartitionIntoCliquesToMinimumCoveringByCliques { + type Source = PartitionIntoCliques; + type Target = MinimumCoveringByCliques; + + fn target_problem(&self) -> &Self::Target { + &self.target + } + + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + let n = self.source_graph.num_vertices(); + let target_edges = self.target.graph().edges(); + if target_solution.len() != target_edges.len() { + return invalid_source_solution(n, self.source_num_cliques); + } + + let mut matching_labels = vec![None; n]; + for ((u, v), &label) in target_edges.iter().zip(target_solution.iter()) { + let matching_index = if *u < n && *v == n + *u { + Some(*u) + } else if *v < n && *u == n + *v { + Some(*v) + } else { + None + }; + + if let Some(i) = matching_index { + matching_labels[i] = Some(label); + } + } + + if matching_labels.iter().any(Option::is_none) { + return invalid_source_solution(n, self.source_num_cliques); + } + + let mut label_map = BTreeMap::new(); + let extracted = matching_labels + .into_iter() + .map(|label| { + let label = label.expect("checked above"); + let next = label_map.len(); + *label_map.entry(label).or_insert(next) + }) + .collect::>(); + + if label_map.len() > self.source_num_cliques { + return invalid_source_solution(n, self.source_num_cliques); + } + + let source_problem = + PartitionIntoCliques::new(self.source_graph.clone(), self.source_num_cliques); + if as crate::traits::Problem>::evaluate( + &source_problem, + &extracted, + ) + .0 + { + extracted + } else { + invalid_source_solution(n, self.source_num_cliques) + } + } +} + +#[reduction( + overhead = { + num_vertices = "2 * num_vertices + 4 * num_edges + 2", + num_edges = "(num_vertices + 2 * num_edges)^2 + 2 * num_vertices + 10 * num_edges", + } +)] +impl ReduceTo> for PartitionIntoCliques { + type Result = ReductionPartitionIntoCliquesToMinimumCoveringByCliques; + + fn reduce_to(&self) -> Self::Result { + let layout = OrlinLayout::new(self.graph()); + let left_vertices = layout.left_vertices(); + let right_vertices = layout.right_vertices(); + let mut edges = Vec::new(); + + // Step 1-2: L and R are cliques. + add_clique_edges(&left_vertices, &mut edges); + add_clique_edges(&right_vertices, &mut edges); + + // Step 3-4: connect z_L and z_R to their respective sides. + for &u in &left_vertices { + edges.push((layout.z_left(), u)); + } + for &u in &right_vertices { + edges.push((layout.z_right(), u)); + } + + // Step 5: matching edges x_i y_i. + for i in 0..self.num_vertices() { + edges.push((layout.x(i), layout.y(i))); + } + + // Step 6: one 4-vertex gadget for each directed source edge. + for (idx, &(i, j)) in layout.directed_pairs.iter().enumerate() { + edges.push((layout.x(i), layout.y(j))); + edges.push((layout.x(i), layout.b(idx))); + edges.push((layout.a(idx), layout.y(j))); + edges.push((layout.a(idx), layout.b(idx))); + } + + let target_graph = SimpleGraph::new(layout.total_vertices(), edges); + let target = MinimumCoveringByCliques::new(target_graph); + + ReductionPartitionIntoCliquesToMinimumCoveringByCliques { + target, + source_graph: self.graph().clone(), + source_num_cliques: self.num_cliques(), + } + } +} + +#[cfg(any(test, feature = "example-db"))] +fn edge_labels_from_clique_cover(graph: &SimpleGraph, cliques: &[Vec]) -> Vec { + let clique_sets = cliques + .iter() + .map(|clique| { + clique + .iter() + .copied() + .collect::>() + }) + .collect::>(); + + graph + .edges() + .into_iter() + .map(|(u, v)| { + clique_sets + .iter() + .position(|clique| clique.contains(&u) && clique.contains(&v)) + .expect("canonical cover should cover every target edge") + }) + .collect() +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + use crate::export::SolutionPair; + + vec![crate::example_db::specs::RuleExampleSpec { + id: "partitionintocliques_to_minimumcoveringbycliques", + build: || { + let source = PartitionIntoCliques::new(SimpleGraph::new(3, vec![(0, 1)]), 2); + let reduction = ReduceTo::>::reduce_to(&source); + let layout = OrlinLayout::new(source.graph()); + + let target_config = edge_labels_from_clique_cover( + reduction.target_problem().graph(), + &[ + vec![layout.x(0), layout.x(1), layout.y(0), layout.y(1)], + vec![layout.x(2), layout.y(2)], + vec![layout.x(0), layout.a(0), layout.b(0), layout.y(1)], + vec![layout.x(1), layout.a(1), layout.b(1), layout.y(0)], + { + let mut clique = layout.left_vertices(); + clique.push(layout.z_left()); + clique + }, + { + let mut clique = layout.right_vertices(); + clique.push(layout.z_right()); + clique + }, + ], + ); + + crate::example_db::specs::rule_example_with_witness::< + _, + MinimumCoveringByCliques, + >( + source, + SolutionPair { + source_config: vec![0, 0, 1], + target_config, + }, + ) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/partitionintocliques_minimumcoveringbycliques.rs"] +mod tests; diff --git a/src/rules/registersufficiency_ilp.rs b/src/rules/registersufficiency_ilp.rs new file mode 100644 index 00000000..0a625ea0 --- /dev/null +++ b/src/rules/registersufficiency_ilp.rs @@ -0,0 +1,192 @@ +//! Reduction from RegisterSufficiency to ILP. +//! +//! The formulation uses: +//! - integer `t_v` variables for evaluation positions +//! - integer `l_v` variables for latest-use positions +//! - binary pair-order selectors to force a permutation of `0..n-1` +//! - binary threshold/live indicators to count how many values are live after +//! each evaluation step + +use crate::models::algebraic::{LinearConstraint, ObjectiveSense, ILP}; +use crate::models::misc::RegisterSufficiency; +use crate::reduction; +use crate::rules::traits::{ReduceTo, ReductionResult}; + +#[derive(Debug, Clone)] +pub struct ReductionRegisterSufficiencyToILP { + target: ILP, + num_vertices: usize, +} + +impl ReductionResult for ReductionRegisterSufficiencyToILP { + type Source = RegisterSufficiency; + type Target = ILP; + + fn target_problem(&self) -> &ILP { + &self.target + } + + fn extract_solution(&self, target_solution: &[usize]) -> Vec { + target_solution[..self.num_vertices].to_vec() + } +} + +#[reduction(overhead = { + num_vars = "3 * num_vertices^2 + num_vertices * (num_vertices - 1) / 2 + 2 * num_vertices", + num_constraints = "9 * num_vertices^2 + 3 * num_vertices * (num_vertices - 1) / 2 + 3 * num_vertices + 2 * num_arcs + num_sinks", +})] +impl ReduceTo> for RegisterSufficiency { + type Result = ReductionRegisterSufficiencyToILP; + + fn reduce_to(&self) -> Self::Result { + let n = self.num_vertices(); + let pair_list: Vec<(usize, usize)> = (0..n) + .flat_map(|u| ((u + 1)..n).map(move |v| (u, v))) + .collect(); + let num_pair_vars = pair_list.len(); + + let time_offset = 0; + let latest_offset = n; + let order_offset = 2 * n; + let before_offset = order_offset + num_pair_vars; + let after_offset = before_offset + n * n; + let live_offset = after_offset + n * n; + let num_vars = live_offset + n * n; + + let time_idx = |vertex: usize| -> usize { time_offset + vertex }; + let latest_idx = |vertex: usize| -> usize { latest_offset + vertex }; + let order_idx = |pair_idx: usize| -> usize { order_offset + pair_idx }; + let before_idx = + |vertex: usize, step: usize| -> usize { before_offset + vertex * n + step }; + let after_idx = |vertex: usize, step: usize| -> usize { after_offset + vertex * n + step }; + let live_idx = |vertex: usize, step: usize| -> usize { live_offset + vertex * n + step }; + + let big_m = n as f64; + let mut has_dependent = vec![false; n]; + let mut constraints = Vec::new(); + + for vertex in 0..n { + constraints.push(LinearConstraint::le( + vec![(time_idx(vertex), 1.0)], + (n.saturating_sub(1)) as f64, + )); + constraints.push(LinearConstraint::le( + vec![(latest_idx(vertex), 1.0)], + n as f64, + )); + } + + for (pair_idx, &(u, v)) in pair_list.iter().enumerate() { + let order_var = order_idx(pair_idx); + constraints.push(LinearConstraint::le(vec![(order_var, 1.0)], 1.0)); + constraints.push(LinearConstraint::ge( + vec![(time_idx(v), 1.0), (time_idx(u), -1.0), (order_var, -big_m)], + 1.0 - big_m, + )); + constraints.push(LinearConstraint::ge( + vec![(time_idx(u), 1.0), (time_idx(v), -1.0), (order_var, big_m)], + 1.0, + )); + } + + for &(dependent, dependency) in self.arcs() { + has_dependent[dependency] = true; + constraints.push(LinearConstraint::ge( + vec![(time_idx(dependent), 1.0), (time_idx(dependency), -1.0)], + 1.0, + )); + constraints.push(LinearConstraint::ge( + vec![(latest_idx(dependency), 1.0), (time_idx(dependent), -1.0)], + 0.0, + )); + } + + for (vertex, &has_child) in has_dependent.iter().enumerate() { + if !has_child { + constraints.push(LinearConstraint::eq( + vec![(latest_idx(vertex), 1.0)], + n as f64, + )); + } + } + + for vertex in 0..n { + for step in 0..n { + let before_var = before_idx(vertex, step); + constraints.push(LinearConstraint::le(vec![(before_var, 1.0)], 1.0)); + constraints.push(LinearConstraint::le( + vec![(time_idx(vertex), 1.0), (before_var, big_m)], + step as f64 + big_m, + )); + constraints.push(LinearConstraint::ge( + vec![(time_idx(vertex), 1.0), (before_var, big_m)], + (step + 1) as f64, + )); + + let after_var = after_idx(vertex, step); + constraints.push(LinearConstraint::le(vec![(after_var, 1.0)], 1.0)); + constraints.push(LinearConstraint::ge( + vec![(latest_idx(vertex), 1.0), (after_var, -big_m)], + (step + 1) as f64 - big_m, + )); + constraints.push(LinearConstraint::le( + vec![(latest_idx(vertex), 1.0), (after_var, -big_m)], + step as f64, + )); + + let live_var = live_idx(vertex, step); + constraints.push(LinearConstraint::le( + vec![(live_var, 1.0), (before_var, -1.0)], + 0.0, + )); + constraints.push(LinearConstraint::le( + vec![(live_var, 1.0), (after_var, -1.0)], + 0.0, + )); + constraints.push(LinearConstraint::ge( + vec![(live_var, 1.0), (before_var, -1.0), (after_var, -1.0)], + -1.0, + )); + } + } + + for step in 0..n { + let live_terms: Vec<(usize, f64)> = + (0..n).map(|vertex| (live_idx(vertex, step), 1.0)).collect(); + constraints.push(LinearConstraint::le(live_terms, self.bound() as f64)); + } + + ReductionRegisterSufficiencyToILP { + target: ILP::new(num_vars, constraints, vec![], ObjectiveSense::Minimize), + num_vertices: n, + } + } +} + +#[cfg(feature = "example-db")] +pub(crate) fn canonical_rule_example_specs() -> Vec { + vec![crate::example_db::specs::RuleExampleSpec { + id: "registersufficiency_to_ilp", + build: || { + let source = RegisterSufficiency::new( + 7, + vec![ + (2, 0), + (2, 1), + (3, 1), + (4, 2), + (4, 3), + (5, 0), + (6, 4), + (6, 5), + ], + 3, + ); + crate::example_db::specs::rule_example_via_ilp::<_, i32>(source) + }, + }] +} + +#[cfg(test)] +#[path = "../unit_tests/rules/registersufficiency_ilp.rs"] +mod tests; diff --git a/src/unit_tests/example_db.rs b/src/unit_tests/example_db.rs index dcd40932..536f999a 100644 --- a/src/unit_tests/example_db.rs +++ b/src/unit_tests/example_db.rs @@ -517,20 +517,17 @@ fn model_specs_are_optimal() { for spec in specs { let name = spec.instance.problem_name(); let variant = spec.instance.variant_map(); - // Try ILP (direct or via reduction), fall back to brute force for small instances - let best_config = ilp_solver - .solve_via_reduction(name, &variant, spec.instance.as_any()) - .or_else(|| { - // Only brute-force if search space is small (≤ 2^20 configs) - let dims = spec.instance.dims_dyn(); - let log_space: f64 = dims.iter().map(|&d| (d as f64).log2()).sum(); - if log_space > 20.0 { - return None; - } - let entry = find_variant_entry(name, &variant)?; - let (config, _) = (entry.solve_witness_fn)(spec.instance.as_any())?; - Some(config) - }); + // Try brute force first for small instances (fast, avoids expensive ILP chains) + let dims = spec.instance.dims_dyn(); + let log_space: f64 = dims.iter().map(|&d| (d as f64).log2()).sum(); + let best_config = if log_space <= 20.0 { + find_variant_entry(name, &variant) + .and_then(|entry| (entry.solve_witness_fn)(spec.instance.as_any())) + .map(|(config, _)| config) + .or_else(|| ilp_solver.solve_via_reduction(name, &variant, spec.instance.as_any())) + } else { + ilp_solver.solve_via_reduction(name, &variant, spec.instance.as_any()) + }; if let Some(best_config) = best_config { let best_value = spec.instance.evaluate_json(&best_config); diff --git a/src/unit_tests/models/algebraic/quadratic_diophantine_equations.rs b/src/unit_tests/models/algebraic/quadratic_diophantine_equations.rs index 8f727c75..055539e5 100644 --- a/src/unit_tests/models/algebraic/quadratic_diophantine_equations.rs +++ b/src/unit_tests/models/algebraic/quadratic_diophantine_equations.rs @@ -2,6 +2,7 @@ use crate::models::algebraic::QuadraticDiophantineEquations; use crate::solvers::BruteForce; use crate::traits::Problem; use crate::types::Or; +use num_bigint::BigUint; fn yes_problem() -> QuadraticDiophantineEquations { // a=3, b=5, c=53: x=1 gives y=10, x=4 gives y=1 @@ -13,15 +14,26 @@ fn no_problem() -> QuadraticDiophantineEquations { QuadraticDiophantineEquations::new(3, 5, 10) } +fn bu(n: u32) -> BigUint { + BigUint::from(n) +} + +fn config_for_x(problem: &QuadraticDiophantineEquations, x: u32) -> Vec { + problem.encode_witness(&bu(x)).unwrap() +} + #[test] -fn test_quadratic_diophantine_equations_basic() { +fn test_quadratic_diophantine_equations_creation_and_accessors() { let problem = yes_problem(); - assert_eq!(problem.a(), 3); - assert_eq!(problem.b(), 5); - assert_eq!(problem.c(), 53); - // max_x = isqrt(53/3) = isqrt(17) = 4 - assert_eq!(problem.dims(), vec![4]); - assert_eq!(problem.num_variables(), 1); + assert_eq!(problem.a(), &bu(3)); + assert_eq!(problem.b(), &bu(5)); + assert_eq!(problem.c(), &bu(53)); + assert_eq!(problem.bit_length_a(), 2); + assert_eq!(problem.bit_length_b(), 3); + assert_eq!(problem.bit_length_c(), 6); + // max_x = floor(sqrt(53 / 3)) = 4, encoded in 3 binary digits. + assert_eq!(problem.dims(), vec![2, 2, 2]); + assert_eq!(problem.num_variables(), 3); assert_eq!( ::NAME, "QuadraticDiophantineEquations" @@ -35,31 +47,43 @@ fn test_quadratic_diophantine_equations_basic() { #[test] fn test_quadratic_diophantine_equations_evaluate_yes() { let problem = yes_problem(); - // config[0]=0 -> x=1: 3*1 + 5y = 53, y=10 - assert_eq!(problem.evaluate(&[0]), Or(true)); - // config[0]=1 -> x=2: 3*4 + 5y = 53, 5y=41, not integer - assert_eq!(problem.evaluate(&[1]), Or(false)); - // config[0]=2 -> x=3: 3*9 + 5y = 53, 5y=26, not integer - assert_eq!(problem.evaluate(&[2]), Or(false)); - // config[0]=3 -> x=4: 3*16 + 5y = 53, 5y=5, y=1 - assert_eq!(problem.evaluate(&[3]), Or(true)); + assert_eq!(problem.evaluate(&config_for_x(&problem, 1)), Or(true)); + assert_eq!(problem.evaluate(&config_for_x(&problem, 2)), Or(false)); + assert_eq!(problem.evaluate(&config_for_x(&problem, 3)), Or(false)); + assert_eq!(problem.evaluate(&config_for_x(&problem, 4)), Or(true)); } #[test] fn test_quadratic_diophantine_equations_evaluate_no() { let problem = no_problem(); - // max_x = isqrt(10/3) = isqrt(3) = 1 - assert_eq!(problem.dims(), vec![1]); - // config[0]=0 -> x=1: 3*1 + 5y = 10, 5y=7, not integer - assert_eq!(problem.evaluate(&[0]), Or(false)); + assert_eq!(problem.dims(), vec![2]); + assert_eq!(problem.evaluate(&config_for_x(&problem, 1)), Or(false)); } #[test] fn test_quadratic_diophantine_equations_evaluate_invalid_config() { let problem = yes_problem(); - // Wrong length assert_eq!(problem.evaluate(&[]), Or(false)); assert_eq!(problem.evaluate(&[0, 1]), Or(false)); + assert_eq!(problem.evaluate(&[0, 1, 2]), Or(false)); +} + +#[test] +fn test_quadratic_diophantine_equations_c_le_a() { + let problem = QuadraticDiophantineEquations::new(10, 1, 5); + assert_eq!(problem.dims(), Vec::::new()); + assert_eq!(problem.evaluate(&[]), Or(false)); +} + +#[test] +fn test_quadratic_diophantine_equations_bigint_witness_encoding_round_trip() { + let c = BigUint::from(1u32) << 202usize; + let problem = QuadraticDiophantineEquations::new(1u32, 1u32, c); + let x = (BigUint::from(1u32) << 100usize) + BigUint::from(1u32); + let config = problem.encode_witness(&x).expect("x should be encodable"); + + assert_eq!(config.len(), problem.dims().len()); + assert_eq!(problem.decode_witness(&config), Some(x)); } #[test] @@ -68,6 +92,8 @@ fn test_quadratic_diophantine_equations_solver_finds_witness() { let solver = BruteForce::new(); let witness = solver.find_witness(&problem).unwrap(); assert_eq!(problem.evaluate(&witness), Or(true)); + let x = problem.decode_witness(&witness).unwrap(); + assert!(matches!(x, v if v == bu(1) || v == bu(4))); } #[test] @@ -75,9 +101,13 @@ fn test_quadratic_diophantine_equations_solver_finds_all_witnesses() { let problem = yes_problem(); let solver = BruteForce::new(); let all = solver.find_all_witnesses(&problem); - // Two solutions: x=1 (config[0]=0) and x=4 (config[0]=3) assert_eq!(all.len(), 2); assert!(all.iter().all(|sol| problem.evaluate(sol) == Or(true))); + let decoded = all + .iter() + .map(|sol| problem.decode_witness(sol).unwrap()) + .collect::>(); + assert_eq!(decoded, std::collections::BTreeSet::from([bu(1), bu(4)])); } #[test] @@ -91,14 +121,7 @@ fn test_quadratic_diophantine_equations_solver_no_witness() { fn test_quadratic_diophantine_equations_serialization() { let problem = yes_problem(); let json = serde_json::to_value(&problem).unwrap(); - assert_eq!( - json, - serde_json::json!({ - "a": 3, - "b": 5, - "c": 53, - }) - ); + assert_eq!(json, serde_json::json!({"a": "3", "b": "5", "c": "53"})); let restored: QuadraticDiophantineEquations = serde_json::from_value(json).unwrap(); assert_eq!(restored.a(), problem.a()); @@ -108,15 +131,14 @@ fn test_quadratic_diophantine_equations_serialization() { #[test] fn test_quadratic_diophantine_equations_deserialization_rejects_invalid() { - // a=0 let result: Result = serde_json::from_value(serde_json::json!({"a": 0, "b": 5, "c": 53})); assert!(result.is_err()); - // b=0 + let result: Result = serde_json::from_value(serde_json::json!({"a": 3, "b": 0, "c": 53})); assert!(result.is_err()); - // c=0 + let result: Result = serde_json::from_value(serde_json::json!({"a": 3, "b": 5, "c": 0})); assert!(result.is_err()); @@ -125,33 +147,23 @@ fn test_quadratic_diophantine_equations_deserialization_rejects_invalid() { #[test] fn test_quadratic_diophantine_equations_check_x() { let problem = yes_problem(); - assert_eq!(problem.check_x(1), Some(10)); // 3 + 50 = 53 - assert_eq!(problem.check_x(2), None); // 12 + 5y = 53, 41/5 not integer - assert_eq!(problem.check_x(3), None); // 27 + 5y = 53, 26/5 not integer - assert_eq!(problem.check_x(4), Some(1)); // 48 + 5 = 53 - assert_eq!(problem.check_x(5), None); // 75 > 53 - assert_eq!(problem.check_x(0), None); // x must be positive -} - -#[test] -fn test_quadratic_diophantine_equations_edge_case_c_less_than_a() { - // c < a: no valid x since a*1^2 = a > c - let problem = QuadraticDiophantineEquations::new(10, 1, 5); - assert_eq!(problem.dims(), vec![0]); + assert_eq!(problem.check_x(&bu(1)), Some(bu(10))); + assert_eq!(problem.check_x(&bu(2)), None); + assert_eq!(problem.check_x(&bu(3)), None); + assert_eq!(problem.check_x(&bu(4)), Some(bu(1))); + assert_eq!(problem.check_x(&bu(5)), None); + assert_eq!(problem.check_x(&BigUint::default()), None); } #[test] fn test_quadratic_diophantine_equations_paper_example() { - // From issue: a=3, b=5, c=53. x=1: y=10, x=4: y=1. let problem = QuadraticDiophantineEquations::new(3, 5, 53); - // Verify the claimed solution x=1 (config[0]=0) - assert_eq!(problem.evaluate(&[0]), Or(true)); - // Verify x=4 (config[0]=3) also works - assert_eq!(problem.evaluate(&[3]), Or(true)); + let config = config_for_x(&problem, 1); + assert_eq!(problem.evaluate(&config), Or(true)); let solver = BruteForce::new(); - let all = solver.find_all_witnesses(&problem); - assert_eq!(all.len(), 2); + let witness = solver.find_witness(&problem).unwrap(); + assert_eq!(problem.evaluate(&witness), Or(true)); } #[test] diff --git a/src/unit_tests/models/misc/maximum_likelihood_ranking.rs b/src/unit_tests/models/misc/maximum_likelihood_ranking.rs index 0d3f3ebe..1868bb4e 100644 --- a/src/unit_tests/models/misc/maximum_likelihood_ranking.rs +++ b/src/unit_tests/models/misc/maximum_likelihood_ranking.rs @@ -13,6 +13,7 @@ fn test_maximum_likelihood_ranking_creation() { let problem = MaximumLikelihoodRanking::new(matrix.clone()); assert_eq!(problem.num_items(), 4); assert_eq!(problem.matrix(), &matrix); + assert_eq!(problem.comparison_count(), 5); assert_eq!(problem.dims(), vec![4; 4]); assert_eq!( ::NAME, @@ -135,6 +136,7 @@ fn test_maximum_likelihood_ranking_two_items() { fn test_maximum_likelihood_ranking_single_item() { let problem = MaximumLikelihoodRanking::new(vec![vec![0]]); assert_eq!(problem.num_items(), 1); + assert_eq!(problem.comparison_count(), 0); assert_eq!(problem.dims(), vec![1]); assert_eq!(problem.evaluate(&[0]), Min(Some(0))); } @@ -151,6 +153,28 @@ fn test_maximum_likelihood_ranking_nonzero_diagonal_panics() { MaximumLikelihoodRanking::new(vec![vec![1, 2], vec![3, 0]]); } +#[test] +fn test_maximum_likelihood_ranking_skew_symmetric() { + // c = 0: skew-symmetric matrix (a_ij = -a_ji) + // Encodes a directed 3-cycle: 0->1, 1->2, 2->0 + let matrix = vec![vec![0, 1, -1], vec![-1, 0, 1], vec![1, -1, 0]]; + let problem = MaximumLikelihoodRanking::new(matrix); + assert_eq!(problem.comparison_count(), 0); + // Ranking [0,1,2]: 1 backward arc (2->0, cost +1), 2 forward arcs (cost -1 each) + // Total = 1 + (-1) + (-1) = -1 = 2*FAS - |A| = 2*1 - 3 + assert_eq!(problem.evaluate(&[0, 1, 2]), Min(Some(-1))); + // Optimal FAS = 1, so minimum cost = 2*1 - 3 = -1 + let solver = BruteForce::new(); + let solution = solver.find_witness(&problem).unwrap(); + assert_eq!(problem.evaluate(&solution), Min(Some(-1))); +} + +#[test] +#[should_panic(expected = "all off-diagonal pairs must have the same comparison count")] +fn test_maximum_likelihood_ranking_inconsistent_pair_sum_panics() { + MaximumLikelihoodRanking::new(vec![vec![0, 4, 3], vec![1, 0, 4], vec![1, 2, 0]]); +} + #[cfg(feature = "example-db")] #[test] fn test_maximum_likelihood_ranking_canonical_example() { diff --git a/src/unit_tests/models/misc/minimum_fault_detection_test_set.rs b/src/unit_tests/models/misc/minimum_fault_detection_test_set.rs index 664f7a98..9d123039 100644 --- a/src/unit_tests/models/misc/minimum_fault_detection_test_set.rs +++ b/src/unit_tests/models/misc/minimum_fault_detection_test_set.rs @@ -49,7 +49,7 @@ fn test_minimum_fault_detection_test_set_evaluate_optimal() { // Config [1,0,0,1]: select pairs (0,5) and (1,6) // (0,5) covers {0,2,3,5}, (1,6) covers {1,3,4,6} - // Union = {0,1,2,3,4,5,6} = all 7 vertices -> Min(2) + // Internal vertices are {2,3,4}; both pairs together cover all three. assert_eq!(problem.evaluate(&[1, 0, 0, 1]), Min(Some(2))); } @@ -58,11 +58,11 @@ fn test_minimum_fault_detection_test_set_evaluate_insufficient() { let problem = issue_problem(); // Config [1,0,0,0]: select only pair (0,5) - // (0,5) covers {0,2,3,5} -> missing {1,4,6} -> Min(None) + // (0,5) covers internal vertices {2,3} -> missing {4} -> Min(None) assert_eq!(problem.evaluate(&[1, 0, 0, 0]), Min(None)); // Config [0,0,0,1]: select only pair (1,6) - // (1,6) covers {1,3,4,6} -> missing {0,2,5} -> Min(None) + // (1,6) covers internal vertices {3,4} -> missing {2} -> Min(None) assert_eq!(problem.evaluate(&[0, 0, 0, 1]), Min(None)); } @@ -71,7 +71,7 @@ fn test_minimum_fault_detection_test_set_evaluate_all_pairs() { let problem = issue_problem(); // Config [1,1,1,1]: select all 4 pairs - // Union covers all vertices -> Min(4) + // Union covers all internal vertices -> Min(4) assert_eq!(problem.evaluate(&[1, 1, 1, 1]), Min(Some(4))); } @@ -83,6 +83,19 @@ fn test_minimum_fault_detection_test_set_evaluate_no_selection() { assert_eq!(problem.evaluate(&[0, 0, 0, 0]), Min(None)); } +#[test] +fn test_minimum_fault_detection_test_set_counts_only_internal_vertices() { + let problem = MinimumFaultDetectionTestSet::new(2, vec![(0, 1)], vec![0], vec![1]); + + // With only an input and an output, there are no internal vertices to cover. + assert_eq!(problem.evaluate(&[0]), Min(Some(0))); + assert_eq!(problem.evaluate(&[1]), Min(Some(1))); + + let solver = BruteForce::new(); + use crate::solvers::Solver; + assert_eq!(solver.solve(&problem), Min(Some(0))); +} + #[test] fn test_minimum_fault_detection_test_set_wrong_config_length() { let problem = issue_problem(); diff --git a/src/unit_tests/reduction_graph.rs b/src/unit_tests/reduction_graph.rs index 89b0a854..ff32ddb6 100644 --- a/src/unit_tests/reduction_graph.rs +++ b/src/unit_tests/reduction_graph.rs @@ -1,7 +1,10 @@ //! Tests for ReductionGraph: discovery, path finding, and typed API. +#[cfg(feature = "ilp-solver")] +use crate::models::algebraic::ILP; use crate::models::decision::Decision; use crate::models::formula::KSatisfiability; +use crate::models::misc::Clustering; use crate::prelude::*; use crate::rules::{MinimizeSteps, ReductionGraph, ReductionMode, TraversalFlow}; use crate::topology::{KingsSubgraph, SimpleGraph, TriangularSubgraph, UnitDiskGraph}; @@ -31,6 +34,21 @@ fn test_reduction_graph_discovers_registered_reductions() { assert!(graph.has_direct_reduction_by_name("Satisfiability", "MaximumIndependentSet")); } +#[test] +fn test_reduction_graph_discovers_k3coloring_to_clustering() { + let graph = ReductionGraph::new(); + + assert!(graph.has_direct_reduction::, Clustering>()); +} + +#[cfg(feature = "ilp-solver")] +#[test] +fn test_reduction_graph_discovers_clustering_to_ilp() { + let graph = ReductionGraph::new(); + + assert!(graph.has_direct_reduction::>()); +} + // ---- Path finding (by name) ---- #[test] @@ -634,7 +652,12 @@ fn find_paths_up_to_stops_after_limit() { // With a limit of 3, should get exactly 3 let limited = graph.find_paths_up_to("MaximumIndependentSet", &src, "QUBO", &dst, 3); - assert_eq!(limited.len(), 3, "should stop after 3 paths"); + assert!( + limited.len() <= 3 && limited.len() < all.len(), + "should stop before enumerating all {} paths, got {}", + all.len(), + limited.len() + ); } #[test] @@ -804,7 +827,7 @@ fn test_find_all_paths_mode_aggregate_rejects_witness_only() { } #[test] -fn test_decision_minimum_vertex_cover_has_direct_aggregate_edge() { +fn test_decision_minimum_vertex_cover_has_both_edges() { let graph = ReductionGraph::new(); assert!(graph.has_direct_reduction_by_name_mode( @@ -812,7 +835,7 @@ fn test_decision_minimum_vertex_cover_has_direct_aggregate_edge() { "MinimumVertexCover", ReductionMode::Aggregate, )); - assert!(!graph.has_direct_reduction_by_name_mode( + assert!(graph.has_direct_reduction_by_name_mode( "DecisionMinimumVertexCover", "MinimumVertexCover", ReductionMode::Witness, @@ -820,7 +843,7 @@ fn test_decision_minimum_vertex_cover_has_direct_aggregate_edge() { } #[test] -fn test_decision_minimum_dominating_set_has_direct_aggregate_edge() { +fn test_decision_minimum_dominating_set_has_both_edges() { let graph = ReductionGraph::new(); assert!(graph.has_direct_reduction_by_name_mode( @@ -828,13 +851,49 @@ fn test_decision_minimum_dominating_set_has_direct_aggregate_edge() { "MinimumDominatingSet", ReductionMode::Aggregate, )); - assert!(!graph.has_direct_reduction_by_name_mode( + assert!(graph.has_direct_reduction_by_name_mode( "DecisionMinimumDominatingSet", "MinimumDominatingSet", ReductionMode::Witness, )); } +#[test] +fn test_decision_minimum_dominating_set_to_minmax_multicenter_has_direct_witness_edge() { + let graph = ReductionGraph::new(); + + assert!(graph.has_direct_reduction_mode::< + Decision>, + MinMaxMulticenter, + >(ReductionMode::Witness)); + assert!(!graph.has_direct_reduction_mode::< + Decision>, + MinMaxMulticenter, + >(ReductionMode::Aggregate)); + assert!(!graph.has_direct_reduction_mode::< + Decision>, + MinMaxMulticenter, + >(ReductionMode::Turing)); +} + +#[test] +fn test_decision_minimum_dominating_set_to_minimum_sum_multicenter_has_direct_witness_edge() { + let graph = ReductionGraph::new(); + + assert!(graph.has_direct_reduction_mode::< + Decision>, + MinimumSumMulticenter, + >(ReductionMode::Witness)); + assert!(!graph.has_direct_reduction_mode::< + Decision>, + MinimumSumMulticenter, + >(ReductionMode::Aggregate)); + assert!(!graph.has_direct_reduction_mode::< + Decision>, + MinimumSumMulticenter, + >(ReductionMode::Turing)); +} + #[test] fn test_optimization_to_decision_turing_edges() { let graph = ReductionGraph::new(); @@ -881,3 +940,51 @@ fn test_ksatisfiability_k3_to_decision_minimum_vertex_cover_direct_witness_edge( Decision>, >(ReductionMode::Turing)); } + +#[test] +fn test_find_paths_bounded_limits_depth() { + let graph = ReductionGraph::new(); + let src = ReductionGraph::variant_to_map(&MaximumIndependentSet::::variant()); + let dst = ReductionGraph::variant_to_map(&QUBO::::variant()); + + // With tight bound, should find fewer (or no) paths than unbounded + let bounded = graph.find_paths_up_to_mode_bounded( + "MaximumIndependentSet", + &src, + "QUBO", + &dst, + ReductionMode::Witness, + 100, + Some(2), + ); + let unbounded = graph.find_paths_up_to_mode_bounded( + "MaximumIndependentSet", + &src, + "QUBO", + &dst, + ReductionMode::Witness, + 100, + None, + ); + assert!( + bounded.len() <= unbounded.len(), + "bounded ({}) should find <= unbounded ({}) paths", + bounded.len(), + unbounded.len() + ); + + // With bound 0, only direct edges (no intermediates) — MIS→QUBO has no direct edge + let direct_only = graph.find_paths_up_to_mode_bounded( + "MaximumIndependentSet", + &src, + "QUBO", + &dst, + ReductionMode::Witness, + 100, + Some(0), + ); + assert!( + direct_only.is_empty(), + "MIS→QUBO has no direct edge, so bound=0 should return empty" + ); +} diff --git a/src/unit_tests/rules/analysis.rs b/src/unit_tests/rules/analysis.rs index 6f81d5c6..a97f6060 100644 --- a/src/unit_tests/rules/analysis.rs +++ b/src/unit_tests/rules/analysis.rs @@ -266,6 +266,11 @@ fn test_find_dominated_rules_returns_known_set() { "GraphPartitioning {graph: \"SimpleGraph\"}", "QUBO {weight: \"f64\"}", ), + // KSat → DecisionMVC → MVC (via witness edge) dominates direct KSat → MVC + ( + "KSatisfiability {k: \"K3\"}", + "MinimumVertexCover {graph: \"SimpleGraph\", weight: \"i32\"}", + ), ] .into_iter() .collect(); diff --git a/src/unit_tests/rules/bmf_ilp.rs b/src/unit_tests/rules/bmf_ilp.rs index e571d768..a0150808 100644 --- a/src/unit_tests/rules/bmf_ilp.rs +++ b/src/unit_tests/rules/bmf_ilp.rs @@ -1,9 +1,7 @@ use super::*; use crate::models::algebraic::{ObjectiveSense, ILP}; -use crate::rules::test_helpers::assert_optimization_round_trip_from_optimization_target; +use crate::rules::test_helpers::assert_bf_vs_ilp; use crate::rules::{ReduceTo, ReductionResult}; -use crate::solvers::{BruteForce, ILPSolver}; -use crate::traits::Problem; #[test] fn test_bmf_to_ilp_structure() { @@ -18,33 +16,18 @@ fn test_bmf_to_ilp_structure() { #[test] fn test_bmf_to_ilp_closed_loop() { - // 2x2 identity, rank 2 — exact factorization exists + // 2x2 identity, rank 2 — exact factorization exists. + // Use ILP solver on target (fast) + brute force on source (tiny 2x2). let problem = BMF::new(vec![vec![true, false], vec![false, true]], 2); let reduction: ReductionBMFToILP = ReduceTo::>::reduce_to(&problem); - assert_optimization_round_trip_from_optimization_target( - &problem, - &reduction, - "BMF->ILP closed loop", - ); + assert_bf_vs_ilp(&problem, &reduction); } #[test] fn test_bmf_to_ilp_bf_vs_ilp() { let problem = BMF::new(vec![vec![true, true], vec![true, false]], 1); let reduction: ReductionBMFToILP = ReduceTo::>::reduce_to(&problem); - - let bf = BruteForce::new(); - let bf_witness = bf.find_witness(&problem).expect("should have a witness"); - let bf_value = problem.evaluate(&bf_witness); - - let ilp_solver = ILPSolver::new(); - let ilp_solution = ilp_solver - .solve(reduction.target_problem()) - .expect("ILP should be solvable"); - let extracted = reduction.extract_solution(&ilp_solution); - let ilp_value = problem.evaluate(&extracted); - - assert_eq!(bf_value, ilp_value); + assert_bf_vs_ilp(&problem, &reduction); } #[test] diff --git a/src/unit_tests/rules/clustering_ilp.rs b/src/unit_tests/rules/clustering_ilp.rs new file mode 100644 index 00000000..a35273e3 --- /dev/null +++ b/src/unit_tests/rules/clustering_ilp.rs @@ -0,0 +1,78 @@ +use super::*; +use crate::models::algebraic::{Comparison, ObjectiveSense, ILP}; +use crate::rules::test_helpers::assert_satisfaction_round_trip_from_optimization_target; +use crate::solvers::ILPSolver; +use crate::traits::Problem; +use crate::types::Or; + +fn canonical_yes_instance() -> Clustering { + Clustering::new( + vec![ + vec![0, 1, 3, 3], + vec![1, 0, 3, 3], + vec![3, 3, 0, 1], + vec![3, 3, 1, 0], + ], + 2, + 1, + ) +} + +fn infeasible_instance() -> Clustering { + Clustering::new(vec![vec![0, 3, 1], vec![3, 0, 1], vec![1, 1, 0]], 1, 1) +} + +#[test] +fn test_clustering_to_ilp_structure() { + let problem = canonical_yes_instance(); + let reduction: ReductionClusteringToILP = ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + + assert_eq!(ilp.num_vars, 8); + assert_eq!(ilp.constraints.len(), 12); + assert_eq!(ilp.sense, ObjectiveSense::Minimize); + assert!(ilp.objective.is_empty()); + + let assignment_constraints = ilp + .constraints + .iter() + .filter(|constraint| constraint.cmp == Comparison::Eq && constraint.rhs == 1.0) + .count(); + let conflict_constraints = ilp + .constraints + .iter() + .filter(|constraint| constraint.cmp == Comparison::Le && constraint.rhs == 1.0) + .count(); + assert_eq!(assignment_constraints, 4); + assert_eq!(conflict_constraints, 8); +} + +#[test] +fn test_clustering_to_ilp_closed_loop() { + let problem = canonical_yes_instance(); + let reduction: ReductionClusteringToILP = ReduceTo::>::reduce_to(&problem); + + assert_satisfaction_round_trip_from_optimization_target( + &problem, + &reduction, + "Clustering->ILP closed loop", + ); +} + +#[test] +fn test_clustering_to_ilp_solution_extraction() { + let problem = canonical_yes_instance(); + let reduction: ReductionClusteringToILP = ReduceTo::>::reduce_to(&problem); + + let extracted = reduction.extract_solution(&[1, 0, 1, 0, 0, 1, 0, 1]); + assert_eq!(extracted, vec![0, 0, 1, 1]); + assert_eq!(problem.evaluate(&extracted), Or(true)); +} + +#[test] +fn test_clustering_to_ilp_infeasible_instance_is_infeasible() { + let problem = infeasible_instance(); + let reduction: ReductionClusteringToILP = ReduceTo::>::reduce_to(&problem); + + assert!(ILPSolver::new().solve(reduction.target_problem()).is_none()); +} diff --git a/src/unit_tests/rules/decisionminimumdominatingset_minimumsummulticenter.rs b/src/unit_tests/rules/decisionminimumdominatingset_minimumsummulticenter.rs new file mode 100644 index 00000000..c6418de9 --- /dev/null +++ b/src/unit_tests/rules/decisionminimumdominatingset_minimumsummulticenter.rs @@ -0,0 +1,93 @@ +use crate::models::decision::Decision; +use crate::models::graph::{MinimumDominatingSet, MinimumSumMulticenter}; +use crate::rules::{ReduceTo, ReductionResult}; +use crate::solvers::BruteForce; +use crate::topology::{Graph, SimpleGraph}; +use crate::traits::Problem; +use crate::types::{One, Or}; + +fn decision_mds( + num_vertices: usize, + edges: &[(usize, usize)], + k: i32, +) -> Decision> { + Decision::new( + MinimumDominatingSet::new( + SimpleGraph::new(num_vertices, edges.to_vec()), + vec![One; num_vertices], + ), + k, + ) +} + +#[test] +fn test_decisionminimumdominatingset_to_minimumsummulticenter_structure() { + let source = decision_mds( + 6, + &[(0, 1), (0, 2), (1, 3), (2, 3), (3, 4), (3, 5), (4, 5)], + 2, + ); + let reduction = ReduceTo::>::reduce_to(&source); + let target = reduction.target_problem(); + + assert_eq!( + target.graph().num_vertices(), + source.inner().graph().num_vertices() + ); + assert_eq!(target.graph().edges(), source.inner().graph().edges()); + assert_eq!(target.vertex_weights(), vec![1i32; 6].as_slice()); + assert_eq!(target.edge_lengths(), vec![1i32; 7].as_slice()); + assert_eq!(target.k(), 2); +} + +#[test] +fn test_decisionminimumdominatingset_to_minimumsummulticenter_closed_loop_yes_instance() { + let source = decision_mds( + 6, + &[(0, 1), (0, 2), (1, 3), (2, 3), (3, 4), (3, 5), (4, 5)], + 2, + ); + let reduction = ReduceTo::>::reduce_to(&source); + let target = reduction.target_problem(); + + let target_solutions = BruteForce::new().find_all_witnesses(target); + assert!( + !target_solutions.is_empty(), + "target should have optimal K-center placements" + ); + + for target_solution in target_solutions { + assert_eq!(target.evaluate(&target_solution).unwrap(), 4); + let extracted = reduction.extract_solution(&target_solution); + assert_eq!(extracted, target_solution); + assert_eq!(source.evaluate(&extracted), Or(true)); + } +} + +#[test] +fn test_decisionminimumdominatingset_to_minimumsummulticenter_closed_loop_no_instance() { + let source = decision_mds( + 6, + &[(0, 1), (0, 2), (1, 3), (2, 3), (3, 4), (3, 5), (4, 5)], + 1, + ); + let reduction = ReduceTo::>::reduce_to(&source); + let target = reduction.target_problem(); + + let target_solutions = BruteForce::new().find_all_witnesses(target); + assert!( + !target_solutions.is_empty(), + "target should still have optimal K-center placements" + ); + + let threshold = source.inner().graph().num_vertices() as i32 - source.k() as i32; + for target_solution in target_solutions { + let target_value = target.evaluate(&target_solution).unwrap(); + assert_eq!(target_value, 6); + assert!(target_value > threshold); + + let extracted = reduction.extract_solution(&target_solution); + assert_eq!(extracted, target_solution); + assert_eq!(source.evaluate(&extracted), Or(false)); + } +} diff --git a/src/unit_tests/rules/decisionminimumdominatingset_minmaxmulticenter.rs b/src/unit_tests/rules/decisionminimumdominatingset_minmaxmulticenter.rs new file mode 100644 index 00000000..f87bf9e6 --- /dev/null +++ b/src/unit_tests/rules/decisionminimumdominatingset_minmaxmulticenter.rs @@ -0,0 +1,75 @@ +use super::*; +use crate::models::decision::Decision; +use crate::models::graph::{MinMaxMulticenter, MinimumDominatingSet}; +use crate::rules::ReduceTo; +use crate::solvers::BruteForce; +use crate::topology::{Graph, SimpleGraph}; +use crate::traits::Problem; +use crate::types::{One, Or}; + +fn decision_mds( + num_vertices: usize, + edges: &[(usize, usize)], + k: i32, +) -> Decision> { + Decision::new( + MinimumDominatingSet::new( + SimpleGraph::new(num_vertices, edges.to_vec()), + vec![One; num_vertices], + ), + k, + ) +} + +#[test] +fn test_decisionminimumdominatingset_to_minmaxmulticenter_structure() { + let source = decision_mds( + 6, + &[(0, 1), (0, 2), (1, 3), (2, 3), (3, 4), (3, 5), (4, 5)], + 2, + ); + let reduction = ReduceTo::>::reduce_to(&source); + let target = reduction.target_problem(); + + assert_eq!( + target.graph().num_vertices(), + source.inner().graph().num_vertices() + ); + assert_eq!(target.graph().edges(), source.inner().graph().edges()); + assert_eq!(target.vertex_weights(), vec![One; 6].as_slice()); + assert_eq!(target.edge_lengths(), vec![One; 7].as_slice()); + assert_eq!(target.k(), 2); +} + +#[test] +fn test_decisionminimumdominatingset_to_minmaxmulticenter_closed_loop() { + let source = decision_mds( + 6, + &[(0, 1), (0, 2), (1, 3), (2, 3), (3, 4), (3, 5), (4, 5)], + 2, + ); + let reduction = ReduceTo::>::reduce_to(&source); + let target = reduction.target_problem(); + + let target_solutions = BruteForce::new().find_all_witnesses(target); + assert!( + !target_solutions.is_empty(), + "target should have feasible K-center placements" + ); + + for target_solution in target_solutions { + let extracted = reduction.extract_solution(&target_solution); + assert_eq!(extracted, target_solution); + assert_eq!(source.evaluate(&extracted), Or(true)); + } +} + +#[test] +fn test_decisionminimumdominatingset_to_minmaxmulticenter_no_witness_when_bound_too_small() { + let source = decision_mds(4, &[(0, 1), (2, 3)], 1); + let reduction = ReduceTo::>::reduce_to(&source); + + assert!(BruteForce::new() + .find_witness(reduction.target_problem()) + .is_none()); +} diff --git a/src/unit_tests/rules/decisionminimumvertexcover_hamiltoniancircuit.rs b/src/unit_tests/rules/decisionminimumvertexcover_hamiltoniancircuit.rs new file mode 100644 index 00000000..db7bf2c6 --- /dev/null +++ b/src/unit_tests/rules/decisionminimumvertexcover_hamiltoniancircuit.rs @@ -0,0 +1,96 @@ +use super::*; +use crate::models::decision::Decision; +use crate::models::graph::{HamiltonianCircuit, MinimumVertexCover}; +use crate::rules::ReduceTo; +use crate::solvers::BruteForce; +use crate::topology::{Graph, SimpleGraph}; +use crate::traits::Problem; + +fn decision_mvc( + num_vertices: usize, + edges: &[(usize, usize)], + weights: &[i32], + k: i32, +) -> Decision> { + Decision::new( + MinimumVertexCover::new( + SimpleGraph::new(num_vertices, edges.to_vec()), + weights.to_vec(), + ), + k, + ) +} + +#[test] +fn test_decisionminimumvertexcover_to_hamiltoniancircuit_structure_counts() { + let source = decision_mvc(3, &[(0, 1), (1, 2)], &[1, 1, 1], 1); + let reduction = ReduceTo::>::reduce_to(&source); + let target = reduction.target_problem(); + + assert_eq!(target.num_vertices(), 25); + assert_eq!(target.num_edges(), 35); + assert_eq!(target.graph().neighbors(0).len(), 6); +} + +#[test] +fn test_decisionminimumvertexcover_to_hamiltoniancircuit_closed_loop() { + let source = decision_mvc(3, &[(0, 1), (1, 2)], &[1, 1, 1], 1); + let reduction = ReduceTo::>::reduce_to(&source); + + let cover = vec![0, 1, 0]; + let target_witness = reduction.build_target_witness(&cover); + + assert!(reduction.target_problem().evaluate(&target_witness).0); + + let extracted = reduction.extract_solution(&target_witness); + assert_eq!(extracted, cover); + assert!(source.evaluate(&extracted).0); +} + +#[test] +fn test_decisionminimumvertexcover_to_hamiltoniancircuit_ignores_isolated_vertices() { + let source = decision_mvc(3, &[(0, 1)], &[1, 1, 1], 1); + let reduction = ReduceTo::>::reduce_to(&source); + + let target_witness = reduction.build_target_witness(&[1, 0, 0]); + assert!(reduction.target_problem().evaluate(&target_witness).0); + + let extracted = reduction.extract_solution(&target_witness); + assert_eq!(extracted.len(), 3); + assert_eq!(extracted[2], 0); + assert!(source.evaluate(&extracted).0); +} + +#[test] +fn test_decisionminimumvertexcover_to_hamiltoniancircuit_fixed_yes_when_k_covers_all_active_vertices( +) { + let source = decision_mvc(3, &[(0, 1), (1, 2)], &[1, 1, 1], 3); + let reduction = ReduceTo::>::reduce_to(&source); + let target = reduction.target_problem(); + + assert_eq!(target.num_vertices(), 3); + assert_eq!(target.num_edges(), 3); + + let witness = BruteForce::new() + .find_witness(target) + .expect("triangle should have a Hamiltonian circuit"); + let extracted = reduction.extract_solution(&witness); + assert!(source.evaluate(&extracted).0); +} + +#[test] +fn test_decisionminimumvertexcover_to_hamiltoniancircuit_fixed_no_when_k_zero() { + let source = decision_mvc(2, &[(0, 1)], &[1, 1], 0); + let reduction = ReduceTo::>::reduce_to(&source); + let target = reduction.target_problem(); + + assert_eq!(target.num_vertices(), 3); + assert!(BruteForce::new().find_witness(target).is_none()); +} + +#[test] +#[should_panic(expected = "unit vertex weights")] +fn test_decisionminimumvertexcover_to_hamiltoniancircuit_rejects_non_unit_weights() { + let source = decision_mvc(2, &[(0, 1)], &[2, 1], 1); + let _ = ReduceTo::>::reduce_to(&source); +} diff --git a/src/unit_tests/rules/exactcoverby3sets_minimumaxiomset.rs b/src/unit_tests/rules/exactcoverby3sets_minimumaxiomset.rs new file mode 100644 index 00000000..074f6ddc --- /dev/null +++ b/src/unit_tests/rules/exactcoverby3sets_minimumaxiomset.rs @@ -0,0 +1,79 @@ +use super::*; +use crate::rules::test_helpers::assert_satisfaction_round_trip_from_optimization_target; +use crate::solvers::BruteForce; +use crate::traits::Problem; +use crate::types::Min; + +fn issue_yes_instance() -> ExactCoverBy3Sets { + ExactCoverBy3Sets::new( + 6, + vec![[0, 1, 2], [0, 3, 4], [2, 4, 5], [1, 3, 5], [0, 2, 4]], + ) +} + +fn shared_zero_instance() -> ExactCoverBy3Sets { + ExactCoverBy3Sets::new(6, vec![[0, 1, 2], [0, 3, 4], [0, 4, 5]]) +} + +#[test] +fn test_exactcoverby3sets_to_minimumaxiomset_closed_loop() { + let source = issue_yes_instance(); + let reduction = ReduceTo::::reduce_to(&source); + let target = reduction.target_problem(); + + assert_satisfaction_round_trip_from_optimization_target( + &source, + &reduction, + "ExactCoverBy3Sets -> MinimumAxiomSet closed loop", + ); + + let optimal = BruteForce::new() + .find_witness(target) + .expect("expected an optimal target witness"); + assert_eq!(target.evaluate(&optimal), Min(Some(2))); +} + +#[test] +fn test_exactcoverby3sets_to_minimumaxiomset_structure() { + let source = issue_yes_instance(); + let reduction = ReduceTo::::reduce_to(&source); + let target = reduction.target_problem(); + + assert_eq!(target.num_sentences(), 11); + assert_eq!(target.num_true_sentences(), 11); + assert_eq!(target.num_implications(), 20); + assert_eq!(target.true_sentences(), &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]); + + assert_eq!(target.implications()[0], (vec![6], 0)); + assert_eq!(target.implications()[1], (vec![6], 1)); + assert_eq!(target.implications()[2], (vec![6], 2)); + assert_eq!(target.implications()[3], (vec![0, 1, 2], 6)); + assert_eq!(target.implications()[16], (vec![10], 0)); + assert_eq!(target.implications()[17], (vec![10], 2)); + assert_eq!(target.implications()[18], (vec![10], 4)); + assert_eq!(target.implications()[19], (vec![0, 2, 4], 10)); +} + +#[test] +fn test_exactcoverby3sets_to_minimumaxiomset_no_instance_gap() { + let source = shared_zero_instance(); + let reduction = ReduceTo::::reduce_to(&source); + let target = reduction.target_problem(); + + let optimal = BruteForce::new() + .find_witness(target) + .expect("expected an optimal target witness"); + assert_eq!(target.evaluate(&optimal), Min(Some(3))); + + let extracted = reduction.extract_solution(&optimal); + assert!(!source.evaluate(&extracted)); +} + +#[test] +fn test_extract_solution_reads_only_set_sentence_axioms() { + let source = issue_yes_instance(); + let reduction = ReduceTo::::reduce_to(&source); + + let extracted = reduction.extract_solution(&[1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1]); + assert_eq!(extracted, vec![0, 0, 0, 1, 1]); +} diff --git a/src/unit_tests/rules/exactcoverby3sets_minimumfaultdetectiontestset.rs b/src/unit_tests/rules/exactcoverby3sets_minimumfaultdetectiontestset.rs new file mode 100644 index 00000000..a974fbe9 --- /dev/null +++ b/src/unit_tests/rules/exactcoverby3sets_minimumfaultdetectiontestset.rs @@ -0,0 +1,90 @@ +use super::*; +use crate::models::misc::MinimumFaultDetectionTestSet; +use crate::models::set::ExactCoverBy3Sets; +use crate::rules::test_helpers::assert_satisfaction_round_trip_from_optimization_target; +use crate::solvers::BruteForce; +use crate::traits::Problem; +use crate::types::Min; + +fn issue_yes_instance() -> ExactCoverBy3Sets { + ExactCoverBy3Sets::new(6, vec![[0, 1, 2], [3, 4, 5], [0, 3, 4]]) +} + +fn no_cover_instance() -> ExactCoverBy3Sets { + ExactCoverBy3Sets::new(6, vec![[0, 1, 2], [0, 3, 4], [0, 4, 5]]) +} + +#[test] +fn test_exactcoverby3sets_to_minimumfaultdetectiontestset_closed_loop() { + let source = issue_yes_instance(); + let reduction = ReduceTo::::reduce_to(&source); + let target = reduction.target_problem(); + + assert_satisfaction_round_trip_from_optimization_target( + &source, + &reduction, + "ExactCoverBy3Sets -> MinimumFaultDetectionTestSet closed loop", + ); + + let best = BruteForce::new() + .find_witness(target) + .expect("expected an optimal target witness"); + assert_eq!(target.evaluate(&best), Min(Some(2))); +} + +#[test] +fn test_exactcoverby3sets_to_minimumfaultdetectiontestset_structure() { + let source = issue_yes_instance(); + let reduction = ReduceTo::::reduce_to(&source); + let target = reduction.target_problem(); + + assert_eq!(target.num_vertices(), 10); + assert_eq!(target.num_arcs(), 15); + assert_eq!(target.inputs(), &[0, 1, 2]); + assert_eq!(target.outputs(), &[9]); + + assert_eq!( + target.arcs(), + &[ + (0, 3), + (0, 4), + (0, 5), + (1, 6), + (1, 7), + (1, 8), + (2, 3), + (2, 6), + (2, 7), + (3, 9), + (4, 9), + (5, 9), + (6, 9), + (7, 9), + (8, 9), + ] + ); +} + +#[test] +fn test_exactcoverby3sets_to_minimumfaultdetectiontestset_no_instance_gap() { + let source = no_cover_instance(); + let reduction = ReduceTo::::reduce_to(&source); + let target = reduction.target_problem(); + + let best = BruteForce::new() + .find_witness(target) + .expect("expected an optimal target witness"); + assert_eq!(target.evaluate(&best), Min(Some(3))); + + let extracted = reduction.extract_solution(&best); + assert!(!source.evaluate(&extracted)); +} + +#[test] +fn test_exactcoverby3sets_to_minimumfaultdetectiontestset_extract_solution_identity() { + let source = issue_yes_instance(); + let reduction = ReduceTo::::reduce_to(&source); + + assert_eq!(reduction.extract_solution(&[1, 1, 0]), vec![1, 1, 0]); + assert!(source.evaluate(&[1, 1, 0]).0); +} diff --git a/src/unit_tests/rules/kcoloring_clustering.rs b/src/unit_tests/rules/kcoloring_clustering.rs new file mode 100644 index 00000000..3c003c41 --- /dev/null +++ b/src/unit_tests/rules/kcoloring_clustering.rs @@ -0,0 +1,69 @@ +use super::*; +use crate::rules::test_helpers::assert_satisfaction_round_trip_from_satisfaction_target; +use crate::solvers::BruteForce; +use crate::topology::SimpleGraph; +use crate::variant::K3; + +#[test] +fn test_kcoloring_to_clustering_closed_loop() { + let source = KColoring::::new(SimpleGraph::cycle(5)); + let reduction = ReduceTo::::reduce_to(&source); + + assert_satisfaction_round_trip_from_satisfaction_target( + &source, + &reduction, + "KColoring->Clustering closed loop", + ); +} + +#[test] +fn test_kcoloring_to_clustering_distance_matrix() { + let source = KColoring::::new(SimpleGraph::new(4, vec![(0, 1), (1, 2), (2, 3)])); + let reduction = ReduceTo::::reduce_to(&source); + let target = reduction.target_problem(); + + assert_eq!(target.num_elements(), 4); + assert_eq!(target.num_clusters(), 3); + assert_eq!(target.diameter_bound(), 0); + assert_eq!( + target.distances(), + &[ + vec![0, 1, 0, 0], + vec![1, 0, 1, 0], + vec![0, 1, 0, 1], + vec![0, 0, 1, 0], + ] + ); +} + +#[test] +fn test_kcoloring_to_clustering_extract_solution_identity() { + let source = KColoring::::new(SimpleGraph::new(3, vec![(0, 1), (1, 2)])); + let reduction = ReduceTo::::reduce_to(&source); + let config = vec![0, 1, 0]; + + assert_eq!(reduction.extract_solution(&config), config); +} + +#[test] +fn test_kcoloring_to_clustering_unsat_preserved() { + let source = KColoring::::new(SimpleGraph::complete(4)); + let reduction = ReduceTo::::reduce_to(&source); + let solver = BruteForce::new(); + + assert!(solver.find_witness(&source).is_none()); + assert!(solver.find_witness(reduction.target_problem()).is_none()); +} + +#[test] +fn test_kcoloring_to_clustering_empty_graph() { + let source = KColoring::::new(SimpleGraph::new(0, vec![])); + let reduction = ReduceTo::::reduce_to(&source); + let target = reduction.target_problem(); + + assert_eq!(target.num_elements(), 1); + assert_eq!(target.num_clusters(), 3); + assert_eq!(target.diameter_bound(), 0); + assert_eq!(reduction.extract_solution(&[2]), Vec::::new()); + assert_satisfaction_round_trip_from_satisfaction_target(&source, &reduction, "empty graph"); +} diff --git a/src/unit_tests/rules/ksatisfiability_feasibleregisterassignment.rs b/src/unit_tests/rules/ksatisfiability_feasibleregisterassignment.rs index c9c10fb7..457ef61d 100644 --- a/src/unit_tests/rules/ksatisfiability_feasibleregisterassignment.rs +++ b/src/unit_tests/rules/ksatisfiability_feasibleregisterassignment.rs @@ -26,11 +26,26 @@ fn test_ksatisfiability_to_feasible_register_assignment_structure() { assert_eq!(target.num_arcs(), 30); assert_eq!(target.num_registers(), 21); - assert_eq!(target.assignment()[s_pos_idx(0)], target.assignment()[s_neg_idx(3, 0)]); - assert_eq!(target.assignment()[s_pos_idx(1)], target.assignment()[s_neg_idx(3, 1)]); - assert_eq!(target.assignment()[s_pos_idx(2)], target.assignment()[s_neg_idx(3, 2)]); - assert_eq!(target.assignment()[r_idx(3, 0, 0)], target.assignment()[rbar_idx(3, 0, 0)]); - assert_eq!(target.assignment()[r_idx(3, 1, 2)], target.assignment()[rbar_idx(3, 1, 2)]); + assert_eq!( + target.assignment()[s_pos_idx(0)], + target.assignment()[s_neg_idx(3, 0)] + ); + assert_eq!( + target.assignment()[s_pos_idx(1)], + target.assignment()[s_neg_idx(3, 1)] + ); + assert_eq!( + target.assignment()[s_pos_idx(2)], + target.assignment()[s_neg_idx(3, 2)] + ); + assert_eq!( + target.assignment()[r_idx(3, 0, 0)], + target.assignment()[rbar_idx(3, 0, 0)] + ); + assert_eq!( + target.assignment()[r_idx(3, 1, 2)], + target.assignment()[rbar_idx(3, 1, 2)] + ); let arc_set: BTreeSet<_> = target.arcs().iter().copied().collect(); assert!(arc_set.contains(&(q_idx(3, 0, 0), p_idx(3, 0, 0)))); @@ -87,7 +102,9 @@ fn test_ksatisfiability_to_feasible_register_assignment_unsatisfiable_instance() let fra_to_ilp = ReduceTo::>::reduce_to(reduction.target_problem()); assert!( - ILPSolver::new().solve(fra_to_ilp.target_problem()).is_none(), + ILPSolver::new() + .solve(fra_to_ilp.target_problem()) + .is_none(), "an unsatisfiable source formula should yield an infeasible FRA instance" ); } diff --git a/src/unit_tests/rules/ksatisfiability_quadraticdiophantineequations.rs b/src/unit_tests/rules/ksatisfiability_quadraticdiophantineequations.rs new file mode 100644 index 00000000..70045195 --- /dev/null +++ b/src/unit_tests/rules/ksatisfiability_quadraticdiophantineequations.rs @@ -0,0 +1,47 @@ +use super::*; +use crate::models::algebraic::QuadraticDiophantineEquations; +use crate::models::formula::CNFClause; +use crate::solvers::BruteForce; +use crate::traits::Problem; +use crate::types::Or; +use crate::variant::K3; + +fn trivial_source() -> KSatisfiability { + KSatisfiability::::new(2, vec![CNFClause::new(vec![1, -1, 2])]) +} + +#[test] +fn test_ksatisfiability_to_quadraticdiophantineequations_closed_loop() { + let source = trivial_source(); + let reduction = ReduceTo::::reduce_to(&source); + + let solver = BruteForce::new(); + let target_solution = solver + .find_witness(reduction.target_problem()) + .expect("target should be satisfiable"); + + assert_eq!( + reduction.target_problem().evaluate(&target_solution), + Or(true) + ); + + let extracted = reduction.extract_solution(&target_solution); + assert_eq!(source.evaluate(&extracted), Or(true)); +} + +#[test] +fn test_ksatisfiability_to_quadraticdiophantineequations_yes_vector_matches_reference() { + let source = canonical_source(); + let reduction = ReduceTo::::reduce_to(&source); + let target = reduction.target_problem(); + + let target_config = target + .encode_witness(&canonical_witness()) + .expect("reference witness must fit target encoding"); + + assert_eq!(target.evaluate(&target_config), Or(true)); + + let extracted = reduction.extract_solution(&target_config); + assert_eq!(extracted, vec![1, 0, 0]); + assert_eq!(source.evaluate(&extracted), Or(true)); +} diff --git a/src/unit_tests/rules/ksatisfiability_registersufficiency.rs b/src/unit_tests/rules/ksatisfiability_registersufficiency.rs new file mode 100644 index 00000000..fdf330cf --- /dev/null +++ b/src/unit_tests/rules/ksatisfiability_registersufficiency.rs @@ -0,0 +1,141 @@ +use super::*; +use crate::models::formula::CNFClause; +use crate::models::misc::RegisterSufficiency; +use crate::traits::Problem; +use crate::types::Or; +use crate::variant::K3; +use std::collections::BTreeSet; + +fn issue_example() -> KSatisfiability { + KSatisfiability::::new( + 3, + vec![ + CNFClause::new(vec![1, -2, 3]), + CNFClause::new(vec![-1, 2, -3]), + ], + ) +} + +fn repeated_positive_literal() -> KSatisfiability { + KSatisfiability::::new(1, vec![CNFClause::new(vec![1, 1, 1])]) +} + +fn contradictory_single_variable() -> KSatisfiability { + KSatisfiability::::new( + 1, + vec![ + CNFClause::new(vec![1, 1, 1]), + CNFClause::new(vec![-1, -1, -1]), + ], + ) +} + +fn positions_from_order(order: &[usize], total_vertices: usize) -> Vec { + assert_eq!(order.len(), total_vertices); + let mut positions = vec![usize::MAX; total_vertices]; + for (position, &vertex) in order.iter().enumerate() { + positions[vertex] = position; + } + assert!(positions.iter().all(|&position| position != usize::MAX)); + positions +} + +#[test] +fn test_ksatisfiability_to_register_sufficiency_structure_issue_example() { + let source = issue_example(); + let reduction = ReduceTo::::reduce_to(&source); + let target = reduction.target_problem(); + let layout = SethiRegisterLayout::new(source.num_vars(), source.num_clauses()); + + assert_eq!(target.num_vertices(), 70); + assert_eq!(target.num_arcs(), 152); + assert_eq!(target.bound(), 23); + + let arc_set: BTreeSet<_> = target.arcs().iter().copied().collect(); + assert!(arc_set.contains(&(layout.initial(), layout.a(0)))); + assert!(arc_set.contains(&(layout.initial(), layout.bnode(3)))); + assert!(arc_set.contains(&(layout.c(0), layout.w(2)))); + assert!(arc_set.contains(&(layout.c(0), layout.z(2)))); + assert!(arc_set.contains(&(layout.x_pos(0), layout.f(0, 0)))); + assert!(arc_set.contains(&(layout.x_neg(1), layout.f(0, 1)))); + assert!(arc_set.contains(&(layout.x_pos(2), layout.f(0, 2)))); + assert!(arc_set.contains(&(layout.x_neg(0), layout.f(0, 1)))); + assert!(arc_set.contains(&(layout.x_neg(0), layout.f(0, 2)))); + assert!(arc_set.contains(&(layout.x_pos(1), layout.f(0, 2)))); +} + +#[test] +fn test_ksatisfiability_to_register_sufficiency_extract_solution_uses_w_snapshot_and_x_pos_sign() { + let source = repeated_positive_literal(); + let reduction = ReduceTo::::reduce_to(&source); + let target = reduction.target_problem(); + let layout = SethiRegisterLayout::new(source.num_vars(), source.num_clauses()); + + let mut order = Vec::with_capacity(target.num_vertices()); + order.push(layout.z(0)); + order.push(layout.x_pos(0)); + order.push(layout.w(0)); + order.push(layout.x_neg(0)); + for vertex in 0..target.num_vertices() { + if !matches!( + vertex, + v if v == layout.z(0) + || v == layout.x_pos(0) + || v == layout.w(0) + || v == layout.x_neg(0) + ) { + order.push(vertex); + } + } + + let extracted = + reduction.extract_solution(&positions_from_order(&order, target.num_vertices())); + assert_eq!(extracted, vec![1]); +} + +#[test] +fn test_ksatisfiability_to_register_sufficiency_closed_loop_via_exact_solver() { + let source = repeated_positive_literal(); + let reduction = ReduceTo::::reduce_to(&source); + + let register_schedule = reduction + .target_problem() + .solve_exact() + .expect("satisfiable source formula should yield a feasible register schedule"); + assert_eq!( + reduction.target_problem().evaluate(®ister_schedule), + Or(true) + ); + + let extracted = reduction.extract_solution(®ister_schedule); + assert_eq!(source.evaluate(&extracted), Or(true)); + assert_eq!(extracted, vec![1]); +} + +#[test] +fn test_ksatisfiability_to_register_sufficiency_unsatisfiable_instance() { + use crate::solvers::{BruteForce, Solver}; + use crate::types::Or; + + let source = contradictory_single_variable(); + // Verify the source is indeed unsatisfiable via brute force + assert_eq!(BruteForce::new().solve(&source), Or(false)); + + // Verify the reduction produces a valid RS instance — we check that + // the structure is correct (vertex/arc counts match Sethi layout) rather + // than solving the 70-vertex RS instance, which would be too slow. + let reduction = ReduceTo::::reduce_to(&source); + let target = reduction.target_problem(); + let layout = SethiRegisterLayout::new(source.num_vars(), source.num_clauses()); + assert_eq!(target.num_vertices(), layout.total_vertices()); +} + +#[cfg(feature = "example-db")] +#[test] +fn test_ksatisfiability_to_register_sufficiency_canonical_example_spec() { + let spec = canonical_rule_example_specs() + .into_iter() + .find(|spec| spec.id == "ksatisfiability_to_registersufficiency") + .expect("missing canonical KSatisfiability -> RegisterSufficiency example spec"); + assert_eq!(spec.id, "ksatisfiability_to_registersufficiency"); +} diff --git a/src/unit_tests/rules/maximum2satisfiability_maxcut.rs b/src/unit_tests/rules/maximum2satisfiability_maxcut.rs index 7f7acee3..16863af5 100644 --- a/src/unit_tests/rules/maximum2satisfiability_maxcut.rs +++ b/src/unit_tests/rules/maximum2satisfiability_maxcut.rs @@ -52,6 +52,30 @@ fn test_maximum2satisfiability_to_maxcut_structure() { assert_eq!(target.evaluate(&target_solution), Max(Some(2))); } +#[test] +fn test_maximum2satisfiability_to_maxcut_issue_affine_relation_on_all_partitions() { + let source = make_issue_instance(); + let reduction = ReduceTo::>::reduce_to(&source); + let target = reduction.target_problem(); + + // For this issue instance, every partition satisfies + // 2 * satisfied_clauses = 8 + cut_weight. + for mask in 0..(1usize << target.num_vertices()) { + let target_solution: Vec = (0..target.num_vertices()) + .map(|bit| (mask >> bit) & 1) + .collect(); + let source_solution = reduction.extract_solution(&target_solution); + let satisfied = source.evaluate(&source_solution).unwrap() as i32; + let cut_weight = target.evaluate(&target_solution).unwrap(); + + assert_eq!( + 2 * satisfied, + 8 + cut_weight, + "target config {target_solution:?}" + ); + } +} + #[test] fn test_maximum2satisfiability_to_maxcut_extract_solution_uses_reference_vertex() { let source = make_issue_instance(); diff --git a/src/unit_tests/rules/minimumfaultdetectiontestset_ilp.rs b/src/unit_tests/rules/minimumfaultdetectiontestset_ilp.rs new file mode 100644 index 00000000..a831575c --- /dev/null +++ b/src/unit_tests/rules/minimumfaultdetectiontestset_ilp.rs @@ -0,0 +1,102 @@ +use super::*; +use crate::models::algebraic::{Comparison, ObjectiveSense}; +use crate::models::misc::MinimumFaultDetectionTestSet; +use crate::rules::test_helpers::assert_bf_vs_ilp; +use crate::solvers::ILPSolver; +use crate::traits::Problem; +use crate::types::Min; + +fn issue_problem() -> MinimumFaultDetectionTestSet { + MinimumFaultDetectionTestSet::new( + 7, + vec![ + (0, 2), + (0, 3), + (1, 3), + (1, 4), + (2, 5), + (3, 5), + (3, 6), + (4, 6), + ], + vec![0, 1], + vec![5, 6], + ) +} + +#[test] +fn test_reduction_creates_covering_ilp() { + let problem = issue_problem(); + let reduction: ReductionMFDTSToILP = ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + + assert_eq!(ilp.num_vars, 4); + assert_eq!(ilp.constraints.len(), 3); + assert_eq!(ilp.sense, ObjectiveSense::Minimize); + assert_eq!(ilp.objective, vec![(0, 1.0), (1, 1.0), (2, 1.0), (3, 1.0)]); + + assert_eq!(ilp.constraints[0].cmp, Comparison::Ge); + assert_eq!(ilp.constraints[0].rhs, 1.0); + assert_eq!(ilp.constraints[0].terms, vec![(0, 1.0)]); + + assert_eq!(ilp.constraints[1].cmp, Comparison::Ge); + assert_eq!(ilp.constraints[1].rhs, 1.0); + assert_eq!( + ilp.constraints[1].terms, + vec![(0, 1.0), (1, 1.0), (2, 1.0), (3, 1.0)] + ); + + assert_eq!(ilp.constraints[2].cmp, Comparison::Ge); + assert_eq!(ilp.constraints[2].rhs, 1.0); + assert_eq!(ilp.constraints[2].terms, vec![(3, 1.0)]); +} + +#[test] +fn test_minimumfaultdetectiontestset_to_ilp_closed_loop() { + let problem = issue_problem(); + let reduction: ReductionMFDTSToILP = ReduceTo::>::reduce_to(&problem); + + let ilp_solution = ILPSolver::new() + .solve(reduction.target_problem()) + .expect("ILP should be solvable"); + let extracted = reduction.extract_solution(&ilp_solution); + + assert_eq!(extracted, vec![1, 0, 0, 1]); + assert_eq!(problem.evaluate(&extracted), Min(Some(2))); + assert_bf_vs_ilp(&problem, &reduction); +} + +#[test] +fn test_reduction_is_infeasible_when_an_internal_vertex_has_no_covering_pair() { + let problem = MinimumFaultDetectionTestSet::new(3, vec![], vec![0], vec![2]); + let reduction: ReductionMFDTSToILP = ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + + assert_eq!(ilp.num_vars, 1); + assert_eq!(ilp.constraints.len(), 1); + assert!(ilp.constraints[0].terms.is_empty()); + assert_eq!(ilp.constraints[0].cmp, Comparison::Ge); + assert_eq!(ilp.constraints[0].rhs, 1.0); + + assert_eq!(problem.evaluate(&[0]), Min(None)); + assert_eq!(problem.evaluate(&[1]), Min(None)); + assert!(ILPSolver::new().solve(ilp).is_none()); +} + +#[test] +fn test_reduction_handles_instances_without_internal_vertices() { + let problem = MinimumFaultDetectionTestSet::new(2, vec![(0, 1)], vec![0], vec![1]); + let reduction: ReductionMFDTSToILP = ReduceTo::>::reduce_to(&problem); + let ilp = reduction.target_problem(); + + assert_eq!(ilp.num_vars, 1); + assert!(ilp.constraints.is_empty()); + + let ilp_solution = ILPSolver::new() + .solve(ilp) + .expect("ILP should be feasible when there are no internal vertices"); + let extracted = reduction.extract_solution(&ilp_solution); + + assert_eq!(extracted, vec![0]); + assert_eq!(problem.evaluate(&extracted), Min(Some(0))); +} diff --git a/src/unit_tests/rules/minimumfeedbackarcset_maximumlikelihoodranking.rs b/src/unit_tests/rules/minimumfeedbackarcset_maximumlikelihoodranking.rs new file mode 100644 index 00000000..c021a9dd --- /dev/null +++ b/src/unit_tests/rules/minimumfeedbackarcset_maximumlikelihoodranking.rs @@ -0,0 +1,161 @@ +#[cfg(feature = "example-db")] +use super::canonical_rule_example_specs; +use super::ReductionFASToMLR; +use crate::models::graph::MinimumFeedbackArcSet; +use crate::models::misc::MaximumLikelihoodRanking; +use crate::rules::test_helpers::assert_optimization_round_trip_from_optimization_target; +use crate::rules::traits::ReductionResult; +use crate::rules::ReduceTo; +#[cfg(feature = "example-db")] +use crate::solvers::BruteForce; +use crate::topology::DirectedGraph; +#[cfg(feature = "example-db")] +use crate::traits::Problem; + +fn issue_example_source() -> MinimumFeedbackArcSet { + MinimumFeedbackArcSet::new( + DirectedGraph::new( + 5, + vec![(0, 1), (1, 2), (2, 0), (2, 3), (3, 4), (4, 2), (0, 4)], + ), + vec![1i32; 7], + ) +} + +fn dag_source() -> MinimumFeedbackArcSet { + MinimumFeedbackArcSet::new( + DirectedGraph::new(4, vec![(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)]), + vec![1i32; 6], + ) +} + +fn bidirectional_source() -> MinimumFeedbackArcSet { + MinimumFeedbackArcSet::new( + DirectedGraph::new(3, vec![(0, 1), (1, 0), (1, 2)]), + vec![1i32; 3], + ) +} + +fn weighted_cycle_source() -> MinimumFeedbackArcSet { + MinimumFeedbackArcSet::new( + DirectedGraph::new(3, vec![(0, 1), (1, 2), (2, 0)]), + vec![10i32, 1, 1], + ) +} + +#[test] +fn test_minimumfeedbackarcset_to_maximumlikelihoodranking_closed_loop() { + let source = issue_example_source(); + let reduction: ReductionFASToMLR = ReduceTo::::reduce_to(&source); + + assert_optimization_round_trip_from_optimization_target( + &source, + &reduction, + "MinimumFeedbackArcSet -> MaximumLikelihoodRanking closed loop (issue example)", + ); +} + +#[test] +fn test_minimumfeedbackarcset_to_maximumlikelihoodranking_dag_closed_loop() { + let source = dag_source(); + let reduction: ReductionFASToMLR = ReduceTo::::reduce_to(&source); + + assert_optimization_round_trip_from_optimization_target( + &source, + &reduction, + "MinimumFeedbackArcSet -> MaximumLikelihoodRanking closed loop (DAG)", + ); +} + +#[test] +fn test_reduction_matrix_matches_issue_example() { + let source = issue_example_source(); + let reduction: ReductionFASToMLR = ReduceTo::::reduce_to(&source); + let target = reduction.target_problem(); + + assert_eq!(target.num_items(), 5); + assert_eq!(target.comparison_count(), 0); + assert_eq!( + target.matrix(), + &vec![ + vec![0, 1, -1, 0, 1], + vec![-1, 0, 1, 0, 0], + vec![1, -1, 0, 1, -1], + vec![0, 0, -1, 0, 1], + vec![-1, 0, 1, -1, 0], + ] + ); +} + +#[test] +fn test_bidirectional_arcs_map_to_zero_entries() { + let source = bidirectional_source(); + let reduction: ReductionFASToMLR = ReduceTo::::reduce_to(&source); + let target = reduction.target_problem(); + + assert_eq!(target.comparison_count(), 0); + assert_eq!( + target.matrix(), + &vec![vec![0, 0, 0], vec![0, 0, 1], vec![0, -1, 0]] + ); +} + +#[test] +fn test_solution_extraction_marks_backward_arcs() { + let source = issue_example_source(); + let reduction: ReductionFASToMLR = ReduceTo::::reduce_to(&source); + + let source_config = reduction.extract_solution(&[0, 1, 2, 3, 4]); + assert_eq!(source_config, vec![0, 0, 1, 0, 0, 1, 0]); +} + +#[test] +#[should_panic( + expected = "MinimumFeedbackArcSet -> MaximumLikelihoodRanking requires unit arc weights" +)] +fn test_weighted_instances_are_rejected() { + let source = weighted_cycle_source(); + let _ = ReduceTo::::reduce_to(&source); +} + +#[cfg(feature = "example-db")] +#[test] +fn test_canonical_rule_example_spec_builds() { + let example = (canonical_rule_example_specs() + .into_iter() + .find(|spec| spec.id == "minimumfeedbackarcset_to_maximumlikelihoodranking") + .expect("example spec should be registered") + .build)(); + + assert_eq!(example.source.problem, "MinimumFeedbackArcSet"); + assert_eq!(example.target.problem, "MaximumLikelihoodRanking"); + assert_eq!(example.solutions.len(), 1); + + let source: MinimumFeedbackArcSet = + serde_json::from_value(example.source.instance.clone()) + .expect("source example deserializes"); + let target: MaximumLikelihoodRanking = serde_json::from_value(example.target.instance.clone()) + .expect("target example deserializes"); + let solution = &example.solutions[0]; + + let source_metric = source.evaluate(&solution.source_config); + let target_metric = target.evaluate(&solution.target_config); + assert!( + source_metric.is_valid(), + "source witness should be feasible" + ); + assert!( + target_metric.is_valid(), + "target witness should be feasible" + ); + + let best_source = BruteForce::new() + .find_witness(&source) + .expect("source example should have an optimum"); + let best_target = BruteForce::new() + .find_witness(&target) + .expect("target example should have an optimum"); + + assert_eq!(source_metric, source.evaluate(&best_source)); + assert_eq!(target_metric, target.evaluate(&best_target)); +} diff --git a/src/unit_tests/rules/minimumfeedbackvertexset_minimumcodegenerationunlimitedregisters.rs b/src/unit_tests/rules/minimumfeedbackvertexset_minimumcodegenerationunlimitedregisters.rs new file mode 100644 index 00000000..f8a17401 --- /dev/null +++ b/src/unit_tests/rules/minimumfeedbackvertexset_minimumcodegenerationunlimitedregisters.rs @@ -0,0 +1,17 @@ +use super::{issue_example_source, ReductionFVSToCodeGen}; +use crate::models::misc::MinimumCodeGenerationUnlimitedRegisters; +use crate::rules::test_helpers::assert_optimization_round_trip_from_optimization_target; +use crate::rules::ReduceTo; + +#[test] +fn test_minimumfeedbackvertexset_to_minimumcodegenerationunlimitedregisters_closed_loop() { + let source = issue_example_source(); + let reduction: ReductionFVSToCodeGen = + ReduceTo::::reduce_to(&source); + + assert_optimization_round_trip_from_optimization_target( + &source, + &reduction, + "MinimumFeedbackVertexSet -> MinimumCodeGenerationUnlimitedRegisters closed loop", + ); +} diff --git a/src/unit_tests/rules/minimumvertexcover_minimumweightandorgraph.rs b/src/unit_tests/rules/minimumvertexcover_minimumweightandorgraph.rs new file mode 100644 index 00000000..64146871 --- /dev/null +++ b/src/unit_tests/rules/minimumvertexcover_minimumweightandorgraph.rs @@ -0,0 +1,125 @@ +#[cfg(feature = "example-db")] +use super::canonical_rule_example_specs; +use super::{issue_example_source, ReductionVCToAndOrGraph}; +use crate::models::graph::MinimumVertexCover; +use crate::models::misc::MinimumWeightAndOrGraph; +use crate::rules::test_helpers::assert_optimization_round_trip_from_optimization_target; +use crate::rules::traits::ReductionResult; +use crate::rules::ReduceTo; +#[cfg(feature = "example-db")] +use crate::solvers::BruteForce; +use crate::topology::SimpleGraph; +use crate::traits::Problem; +use crate::types::Min; + +fn weighted_path_source() -> MinimumVertexCover { + MinimumVertexCover::new(SimpleGraph::new(3, vec![(0, 1), (1, 2)]), vec![4, 1, 3]) +} + +#[test] +fn test_minimumvertexcover_to_minimumweightandorgraph_closed_loop() { + let source = issue_example_source(); + let reduction: ReductionVCToAndOrGraph = + ReduceTo::::reduce_to(&source); + + assert_optimization_round_trip_from_optimization_target( + &source, + &reduction, + "MVC -> MinimumWeightAndOrGraph closed loop", + ); +} + +#[test] +fn test_reduction_structure() { + let source = issue_example_source(); + let reduction: ReductionVCToAndOrGraph = + ReduceTo::::reduce_to(&source); + let target = reduction.target_problem(); + + assert_eq!(target.num_vertices(), 9); + assert_eq!(target.num_arcs(), 9); + assert_eq!(target.source(), 0); + assert_eq!( + target.gate_types(), + &[ + Some(true), + Some(false), + Some(false), + Some(false), + Some(false), + Some(false), + None, + None, + None, + ] + ); + assert_eq!( + target.arcs(), + &[ + (0, 1), + (0, 2), + (1, 3), + (1, 4), + (2, 4), + (2, 5), + (3, 6), + (4, 7), + (5, 8), + ] + ); + assert_eq!(target.arc_weights(), &[1, 1, 1, 1, 1, 1, 1, 1, 1]); +} + +#[test] +fn test_weighted_vertices_are_charged_on_sink_arcs() { + let source = weighted_path_source(); + let reduction: ReductionVCToAndOrGraph = + ReduceTo::::reduce_to(&source); + let target = reduction.target_problem(); + + let target_solution = vec![1, 1, 0, 1, 1, 0, 0, 1, 0]; + assert_eq!(source.evaluate(&[0, 1, 0]), Min(Some(1))); + assert_eq!(target.evaluate(&target_solution), Min(Some(5))); + assert_eq!(target.arc_weights(), &[1, 1, 1, 1, 1, 1, 4, 1, 3]); + assert_eq!(reduction.extract_solution(&target_solution), vec![0, 1, 0]); +} + +#[cfg(feature = "example-db")] +#[test] +fn test_canonical_rule_example_spec_builds() { + let example = (canonical_rule_example_specs() + .into_iter() + .find(|spec| spec.id == "minimumvertexcover_to_minimumweightandorgraph") + .expect("example spec should be registered") + .build)(); + + assert_eq!(example.source.problem, "MinimumVertexCover"); + assert_eq!(example.target.problem, "MinimumWeightAndOrGraph"); + assert_eq!(example.solutions.len(), 1); + + let source: MinimumVertexCover = + serde_json::from_value(example.source.instance.clone()) + .expect("source example deserializes"); + let target: MinimumWeightAndOrGraph = serde_json::from_value(example.target.instance.clone()) + .expect("target example deserializes"); + let solution = &example.solutions[0]; + + assert_eq!(source.evaluate(&solution.source_config), Min(Some(1))); + assert_eq!(target.evaluate(&solution.target_config), Min(Some(5))); + + let best_source = BruteForce::new() + .find_witness(&source) + .expect("source example should have an optimum"); + let best_target = BruteForce::new() + .find_witness(&target) + .expect("target example should have an optimum"); + + assert_eq!( + source.evaluate(&solution.source_config), + source.evaluate(&best_source) + ); + assert_eq!( + target.evaluate(&solution.target_config), + target.evaluate(&best_target) + ); +} diff --git a/src/unit_tests/rules/partitionintocliques_minimumcoveringbycliques.rs b/src/unit_tests/rules/partitionintocliques_minimumcoveringbycliques.rs new file mode 100644 index 00000000..b1b5d983 --- /dev/null +++ b/src/unit_tests/rules/partitionintocliques_minimumcoveringbycliques.rs @@ -0,0 +1,103 @@ +use super::*; +use crate::rules::test_helpers::assert_satisfaction_round_trip_from_optimization_target; +use crate::topology::Graph; +use crate::traits::Problem; +use crate::types::{Min, Or}; + +#[test] +fn test_partitionintocliques_to_minimumcoveringbycliques_closed_loop() { + let source = PartitionIntoCliques::new(SimpleGraph::new(1, vec![]), 1); + let reduction = ReduceTo::>::reduce_to(&source); + + assert_satisfaction_round_trip_from_optimization_target( + &source, + &reduction, + "PartitionIntoCliques -> MinimumCoveringByCliques closed loop", + ); +} + +#[test] +fn test_partitionintocliques_to_minimumcoveringbycliques_orlin_example_structure() { + let source = PartitionIntoCliques::new(SimpleGraph::new(3, vec![(0, 1)]), 2); + let reduction = ReduceTo::>::reduce_to(&source); + let target = reduction.target_problem(); + let layout = OrlinLayout::new(source.graph()); + + assert_eq!(target.graph().num_vertices(), 12); + assert_eq!(target.graph().num_edges(), 41); + + // Left clique on x_0, x_1, x_2, a_(0,1), a_(1,0) + assert!(target.graph().has_edge(0, 1)); + assert!(target.graph().has_edge(0, 2)); + assert!(target.graph().has_edge(1, 2)); + assert!(target.graph().has_edge(0, 6)); + assert!(target.graph().has_edge(1, 7)); + + // Right clique on y_0, y_1, y_2, b_(0,1), b_(1,0) + assert!(target.graph().has_edge(3, 4)); + assert!(target.graph().has_edge(3, 5)); + assert!(target.graph().has_edge(4, 5)); + assert!(target.graph().has_edge(3, 8)); + assert!(target.graph().has_edge(4, 9)); + + // Matching and gadget cross edges from the issue body + assert!(target.graph().has_edge(0, 3)); + assert!(target.graph().has_edge(1, 4)); + assert!(target.graph().has_edge(0, 4)); + assert!(target.graph().has_edge(0, 8)); + assert!(target.graph().has_edge(6, 4)); + assert!(target.graph().has_edge(6, 8)); + + let target_solution = edge_labels_from_clique_cover( + target.graph(), + &[ + vec![layout.x(0), layout.x(1), layout.y(0), layout.y(1)], + vec![layout.x(2), layout.y(2)], + vec![layout.x(0), layout.a(0), layout.b(0), layout.y(1)], + vec![layout.x(1), layout.a(1), layout.b(1), layout.y(0)], + { + let mut clique = layout.left_vertices(); + clique.push(layout.z_left()); + clique + }, + { + let mut clique = layout.right_vertices(); + clique.push(layout.z_right()); + clique + }, + ], + ); + assert_eq!(target.evaluate(&target_solution), Min(Some(6))); + assert_eq!(reduction.extract_solution(&target_solution), vec![0, 0, 1]); +} + +#[test] +fn test_partitionintocliques_to_minimumcoveringbycliques_unsat_extracts_invalid_source() { + let source = PartitionIntoCliques::new(SimpleGraph::new(2, vec![]), 1); + let reduction = ReduceTo::>::reduce_to(&source); + let target = reduction.target_problem(); + let layout = OrlinLayout::new(source.graph()); + + let target_solution = edge_labels_from_clique_cover( + target.graph(), + &[ + { + let mut clique = layout.left_vertices(); + clique.push(layout.z_left()); + clique + }, + { + let mut clique = layout.right_vertices(); + clique.push(layout.z_right()); + clique + }, + vec![layout.x(0), layout.y(0)], + vec![layout.x(1), layout.y(1)], + ], + ); + assert_eq!(target.evaluate(&target_solution), Min(Some(4))); + + let extracted = reduction.extract_solution(&target_solution); + + assert_eq!(source.evaluate(&extracted), Or(false)); +} diff --git a/src/unit_tests/rules/registersufficiency_ilp.rs b/src/unit_tests/rules/registersufficiency_ilp.rs new file mode 100644 index 00000000..8b5e9ec7 --- /dev/null +++ b/src/unit_tests/rules/registersufficiency_ilp.rs @@ -0,0 +1,111 @@ +use super::*; +use crate::models::misc::RegisterSufficiency; +use crate::solvers::ILPSolver; +use crate::traits::Problem; +use crate::types::Or; + +fn feasible_example() -> RegisterSufficiency { + RegisterSufficiency::new(4, vec![(2, 0), (3, 1)], 2) +} + +fn infeasible_example() -> RegisterSufficiency { + RegisterSufficiency::new(4, vec![(1, 0), (2, 1), (3, 2), (3, 0)], 1) +} + +#[allow(dead_code)] +fn canonical_example() -> RegisterSufficiency { + RegisterSufficiency::new( + 7, + vec![ + (2, 0), + (2, 1), + (3, 1), + (4, 2), + (4, 3), + (5, 0), + (6, 4), + (6, 5), + ], + 3, + ) +} + +#[test] +fn test_register_sufficiency_to_ilp_structure() { + let source = feasible_example(); + let reduction = ReduceTo::>::reduce_to(&source); + let ilp = reduction.target_problem(); + + assert_eq!(ilp.num_vars, 62); + assert_eq!(ilp.constraints.len(), 180); + assert_eq!(ilp.objective, vec![]); + assert_eq!(ilp.sense, ObjectiveSense::Minimize); +} + +#[test] +fn test_register_sufficiency_to_ilp_closed_loop() { + let source = feasible_example(); + let reduction = ReduceTo::>::reduce_to(&source); + + let ilp_solution = ILPSolver::new() + .solve(reduction.target_problem()) + .expect("feasible register-sufficiency instance should yield a feasible ILP"); + let extracted = reduction.extract_solution(&ilp_solution); + + assert_eq!(source.evaluate(&extracted), Or(true)); + let mut sorted = extracted.clone(); + sorted.sort_unstable(); + assert_eq!(sorted, vec![0, 1, 2, 3]); +} + +#[test] +fn test_register_sufficiency_to_ilp_infeasible() { + let source = infeasible_example(); + let reduction = ReduceTo::>::reduce_to(&source); + + assert!( + ILPSolver::new().solve(reduction.target_problem()).is_none(), + "register-sufficiency instance with bound one should be infeasible" + ); +} + +#[test] +fn test_register_sufficiency_to_ilp_bf_vs_ilp() { + let source = feasible_example(); + let reduction = ReduceTo::>::reduce_to(&source); + crate::rules::test_helpers::assert_bf_vs_ilp(&source, &reduction); +} + +#[cfg(feature = "example-db")] +#[test] +fn test_register_sufficiency_to_ilp_canonical_example_spec() { + let spec = canonical_rule_example_specs() + .into_iter() + .find(|spec| spec.id == "registersufficiency_to_ilp") + .expect("missing canonical RegisterSufficiency -> ILP example spec"); + let example = (spec.build)(); + + assert_eq!(example.source.problem, "RegisterSufficiency"); + assert_eq!(example.target.problem, "ILP"); + assert_eq!(example.source.instance["num_vertices"], 7); + assert_eq!(example.source.instance["bound"], 3); + assert_eq!(example.source.instance["arcs"].as_array().unwrap().len(), 8); + assert_eq!(example.target.instance["num_vars"], 182); + assert_eq!( + example.target.instance["constraints"] + .as_array() + .unwrap() + .len(), + 542 + ); + assert_eq!(example.solutions.len(), 1); + + let source = canonical_example(); + let reduction = ReduceTo::>::reduce_to(&source); + let solution = &example.solutions[0]; + assert_eq!(source.evaluate(&solution.source_config), Or(true)); + assert_eq!( + reduction.extract_solution(&solution.target_config), + solution.source_config + ); +} diff --git a/tests/main.rs b/tests/main.rs index a1aa9fa8..6f8e4c24 100644 --- a/tests/main.rs +++ b/tests/main.rs @@ -8,10 +8,10 @@ mod integration; mod jl_parity; #[path = "suites/ksatisfiability_simultaneous_incongruences.rs"] mod ksatisfiability_simultaneous_incongruences; +#[path = "suites/reductions.rs"] +mod reductions; #[cfg(feature = "ilp-solver")] #[path = "suites/register_assignment_reductions.rs"] mod register_assignment_reductions; -#[path = "suites/reductions.rs"] -mod reductions; #[path = "suites/simultaneous_incongruences.rs"] mod simultaneous_incongruences; diff --git a/tests/suites/reductions.rs b/tests/suites/reductions.rs index 325847fa..3e164ecd 100644 --- a/tests/suites/reductions.rs +++ b/tests/suites/reductions.rs @@ -4,15 +4,13 @@ //! solutions can be properly extracted through the reduction pipeline. use problemreductions::models::algebraic::{LinearConstraint, ObjectiveSense, ILP}; -#[cfg(feature = "ilp-solver")] -use problemreductions::models::graph::MinimumCoveringByCliques; +use problemreductions::models::graph::{MinimumCoveringByCliques, PartitionIntoCliques}; use problemreductions::prelude::*; use problemreductions::rules::{Minimize, ReductionGraph}; #[cfg(feature = "ilp-solver")] use problemreductions::solvers::ILPSolver; use problemreductions::topology::{Graph, SimpleGraph}; -#[cfg(feature = "ilp-solver")] -use problemreductions::types::Min; +use problemreductions::types::{Min, Or}; use problemreductions::variant::{K2, K3}; /// Tests for MaximumIndependentSet <-> MinimumVertexCover reductions. @@ -324,6 +322,36 @@ mod minimum_covering_by_cliques_ilp_reductions { } } +/// Tests for PartitionIntoCliques -> MinimumCoveringByCliques reductions. +mod partition_into_cliques_covering_by_cliques_reductions { + use super::*; + + #[test] + fn test_partition_into_cliques_to_covering_by_cliques_closed_loop() { + let source = PartitionIntoCliques::new(SimpleGraph::new(1, vec![]), 1); + + let reduction = ReduceTo::>::reduce_to(&source); + let target = reduction.target_problem(); + + let target_solution = BruteForce::new() + .find_witness(target) + .expect("target should be solvable"); + let extracted = reduction.extract_solution(&target_solution); + + assert_eq!(source.evaluate(&extracted), Or(true)); + } + + #[test] + fn test_partition_into_cliques_to_covering_by_cliques_orlin_issue_counts() { + let source = PartitionIntoCliques::new(SimpleGraph::new(3, vec![(0, 1)]), 2); + let reduction = ReduceTo::>::reduce_to(&source); + let target = reduction.target_problem(); + + assert_eq!(target.graph().num_vertices(), 12); + assert_eq!(target.graph().num_edges(), 41); + } +} + /// Tests for Maximum2Satisfiability -> MaxCut reductions. mod max2sat_maxcut_reductions { use super::*; diff --git a/tests/suites/register_assignment_reductions.rs b/tests/suites/register_assignment_reductions.rs index 35540302..a124edb0 100644 --- a/tests/suites/register_assignment_reductions.rs +++ b/tests/suites/register_assignment_reductions.rs @@ -65,7 +65,10 @@ fn test_ksat_to_fra_structure_and_closed_loop_via_ilp() { assert_eq!(fra.num_registers(), 21); let fra_path = fra_to_ilp_path(); - assert_eq!(fra_path.type_names(), vec!["FeasibleRegisterAssignment", "ILP"]); + assert_eq!( + fra_path.type_names(), + vec!["FeasibleRegisterAssignment", "ILP"] + ); let fra_chain = graph .reduce_along_path(&fra_path, fra as &dyn std::any::Any) .expect("FRA -> ILP reduction should execute"); @@ -101,7 +104,9 @@ fn test_unsatisfiable_ksat_stays_infeasible_through_fra_to_ilp() { .expect("FRA -> ILP reduction should execute"); assert!( - ILPSolver::new().solve(fra_chain.target_problem::>()).is_none(), + ILPSolver::new() + .solve(fra_chain.target_problem::>()) + .is_none(), "unsatisfiable source instance should yield an infeasible ILP" ); }