Compare commits

...

38 Commits

Author SHA1 Message Date
Scott Morrison
74a09f1ac6 Merge remote-tracking branch 'origin/master' into omega_sup 2024-02-19 10:48:41 +11:00
Scott Morrison
47f50b9427 remove existing declaration from bench/reduceMatch 2024-02-19 10:47:25 +11:00
Scott Morrison
84e189191b Merge remote-tracking branch 'origin/master' into omega_sup 2024-02-18 18:27:32 +11:00
Scott Morrison
0198b951db change path reference 2024-02-18 18:26:56 +11:00
Scott Morrison
0c0f9fae98 change namespace 2024-02-16 22:29:32 +11:00
Scott Morrison
c34534f293 fix duplicate name in test 2024-02-16 16:49:55 +11:00
Scott Morrison
8d05f8e3c0 merge master 2024-02-16 16:32:14 +11:00
Scott Morrison
2d95716c82 and the test file succeeds 2024-02-16 16:28:05 +11:00
Scott Morrison
4450287e1c Merge branch 'upstream_inequality_lemmas' into omega_sup 2024-02-16 16:10:01 +11:00
Scott Morrison
75e1dcd52c fix testss 2024-02-16 16:09:26 +11:00
Scott Morrison
a679a1fa9e fixes 2024-02-16 16:07:48 +11:00
Scott Morrison
8d5c265848 fixes 2024-02-16 16:07:20 +11:00
Scott Morrison
2f64bff876 first successful builtin run; lots of cleanup 2024-02-16 15:40:55 +11:00
Scott Morrison
a2fac88825 Merge branch 'upstream_inequality_lemmas' into omega_sup 2024-02-16 14:26:04 +11:00
Scott Morrison
aa26e92016 oops, namespace 2024-02-16 14:25:28 +11:00
Scott Morrison
a28001be5d hmm 2024-02-16 14:25:10 +11:00
Scott Morrison
0c8444fb47 hmm 2024-02-16 14:24:56 +11:00
Scott Morrison
96f683de94 Merge branch 'upstream_nat_recogniser' into omega_sup 2024-02-16 14:02:46 +11:00
Scott Morrison
2d81f260f9 Merge branch 'upstream_exfalso' into omega_sup 2024-02-16 14:02:25 +11:00
Scott Morrison
799bfc1b0c Merge branch 'upstream_false_or_by_contra' into omega_sup 2024-02-16 14:02:13 +11:00
Scott Morrison
c1932eaa2e Merge branch 'upstream_int_init' into omega_sup 2024-02-16 14:02:00 +11:00
Scott Morrison
d09149512b Merge branch 'upstream_Ordering' into omega_sup 2024-02-16 14:01:48 +11:00
Scott Morrison
36ec1739fd Merge branch 'upstream_inequality_lemmas' into omega_sup 2024-02-16 14:01:37 +11:00
Scott Morrison
1deaeeb1ae merge conflict 2024-02-16 13:59:46 +11:00
Scott Morrison
efa92c2ed4 fix tests 2024-02-16 13:55:57 +11:00
Scott Morrison
411d8664e6 fixes to tests 2024-02-16 13:51:23 +11:00
Scott Morrison
c564bb74d6 chore: copy across copyright headers 2024-02-16 13:48:06 +11:00
Scott Morrison
8bf2400016 chore: upstream basic statements about inequalities 2024-02-16 13:35:01 +11:00
Joe Hendrix
de97f2eee9 chore: add toNat' 2024-02-15 18:26:05 -08:00
Scott Morrison
9b22b3763c use builtin properly 2024-02-16 13:24:09 +11:00
Scott Morrison
9538654b32 fix builtin_tactic 2024-02-16 13:21:15 +11:00
Scott Morrison
e7cab3c032 chore: upstream Std's material on Ord and Ordering 2024-02-16 13:17:35 +11:00
Joe Hendrix
6b41061799 chore: Upstream Std.Data.Int.Init.Order and .DivMod lemmas 2024-02-15 18:14:42 -08:00
Joe Hendrix
c9f6c9b0a6 chore: upstream some basic definitions and Init.Lemmas 2024-02-15 18:14:34 -08:00
Scott Morrison
0085f3d47b chore: upstream MVarId.applyConst
.

fix namespace

.

formatting doc-string

add file
2024-02-16 13:08:55 +11:00
Scott Morrison
7a54e585a8 improve doc-strings 2024-02-16 12:47:49 +11:00
Scott Morrison
482430aa35 chore: upstream exfalso 2024-02-16 12:38:12 +11:00
Scott Morrison
4e687177e6 chore: upstream Expr.nat? and int? for recognising 'normal form' numerals 2024-02-16 12:32:17 +11:00
22 changed files with 3470 additions and 6 deletions

View File

@@ -32,3 +32,4 @@ import Init.Simproc
import Init.SizeOfLemmas
import Init.BinderPredicates
import Init.Ext
import Init.Omega

View File

@@ -889,6 +889,33 @@ def minimum? [Min α] : List α → Option α
| [] => none
| a::as => some <| as.foldl min a
/-- Inserts an element into a list without duplication. -/
@[inline] protected def insert [BEq α] (a : α) (l : List α) : List α :=
if l.elem a then l else a :: l
instance decidableBEx (p : α Prop) [DecidablePred p] :
l : List α, Decidable (Exists fun x => x l p x)
| [] => isFalse nofun
| x :: xs =>
if h₁ : p x then isTrue x, .head .., h₁ else
match decidableBEx p xs with
| isTrue h₂ => isTrue <| let y, hm, hp := h₂; y, .tail _ hm, hp
| isFalse h₂ => isFalse fun
| y, .tail _ h, hp => h₂ y, h, hp
| _, .head .., hp => h₁ hp
instance decidableBAll (p : α Prop) [DecidablePred p] :
l : List α, Decidable ( x, x l p x)
| [] => isTrue nofun
| x :: xs =>
if h₁ : p x then
match decidableBAll p xs with
| isTrue h₂ => isTrue fun
| y, .tail _ h => h₂ y h
| _, .head .. => h₁
| isFalse h₂ => isFalse fun H => h₂ fun y hm => H y (.tail _ hm)
else isFalse fun H => h₁ <| H x (.head ..)
instance [BEq α] [LawfulBEq α] : LawfulBEq (List α) where
eq_of_beq {as bs} := by
induction as generalizing bs with

View File

@@ -1285,6 +1285,41 @@ structure Config where
end Rewrite
namespace Omega
/-- Configures the behaviour of the `omega` tactic. -/
structure OmegaConfig where
/--
Split disjunctions in the context.
Note that with `splitDisjunctions := false` omega will not be able to solve `x = y` goals
as these are usually handled by introducing `¬ x = y` as a hypothesis, then replacing this with
`x < y x > y`.
On the other hand, `omega` does not currently detect disjunctions which, when split,
introduce no new useful information, so the presence of irrelevant disjunctions in the context
can significantly increase run time.
-/
splitDisjunctions : Bool := true
/--
Whenever `((a - b : Nat) : Int)` is found, register the disjunction
`b ≤ a ∧ ((a - b : Nat) : Int) = a - b a < b ∧ ((a - b : Nat) : Int) = 0`
for later splitting.
-/
splitNatSub : Bool := true
/--
Whenever `Int.natAbs a` is found, register the disjunction
`0 ≤ a ∧ Int.natAbs a = a a < 0 ∧ Int.natAbs a = - a` for later splitting.
-/
splitNatAbs : Bool := true
/--
Whenever `min a b` or `max a b` is found, rewrite in terms of the definition
`if a ≤ b ...`, for later case splitting.
-/
splitMinMax : Bool := true
end Omega
end Meta
namespace Parser.Tactic

6
src/Init/Omega.lean Normal file
View File

@@ -0,0 +1,6 @@
prelude
import Init.Omega.Int
import Init.Omega.IntList
import Init.Omega.LinearCombo
import Init.Omega.Constraint
import Init.Omega.Logic

112
src/Init/Omega/Coeffs.lean Normal file
View File

@@ -0,0 +1,112 @@
/-
Copyright (c) 2023 Lean FRO, LLC. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Scott Morrison
-/
prelude
import Init.Omega.IntList
/-!
# `Coeffs` as a wrapper for `IntList`
Currently `omega` uses a dense representation for coefficients.
However, we can swap this out for a sparse representation.
This file sets up `Coeffs` as a type synonym for `IntList`,
and abbreviations for the functions in the `IntList` namespace which we need to use in the
`omega` algorithm.
There is an equivalent file setting up `Coeffs` as a type synonym for `AssocList Nat Int`,
currently in a private branch.
Not all the theorems about the algebraic operations on that representation have been proved yet.
When they are ready, we can replace the implementation in `omega` simply by importing
`Std.Tactic.Omega.Coeffs.IntDict` instead of `Std.Tactic.Omega.Coeffs.IntList`.
For small problems, the sparse representation is actually slightly slower,
so it is not urgent to make this replacement.
-/
namespace Lean.Omega
/-- Type synonym for `IntList := List Int`. -/
abbrev Coeffs := IntList
namespace Coeffs
/-- Identity, turning `Coeffs` into `List Int`. -/
abbrev toList (xs : Coeffs) : List Int := xs
/-- Identity, turning `List Int` into `Coeffs`. -/
abbrev ofList (xs : List Int) : Coeffs := xs
/-- Are the coefficients all zero? -/
abbrev isZero (xs : Coeffs) : Prop := x, x xs x = 0
/-- Shim for `IntList.set`. -/
abbrev set (xs : Coeffs) (i : Nat) (y : Int) : Coeffs := IntList.set xs i y
/-- Shim for `IntList.get`. -/
abbrev get (xs : Coeffs) (i : Nat) : Int := IntList.get xs i
/-- Shim for `IntList.gcd`. -/
abbrev gcd (xs : Coeffs) : Nat := IntList.gcd xs
/-- Shim for `IntList.smul`. -/
abbrev smul (xs : Coeffs) (g : Int) : Coeffs := IntList.smul xs g
/-- Shim for `IntList.sdiv`. -/
abbrev sdiv (xs : Coeffs) (g : Int) : Coeffs := IntList.sdiv xs g
/-- Shim for `IntList.dot`. -/
abbrev dot (xs ys : Coeffs) : Int := IntList.dot xs ys
/-- Shim for `IntList.add`. -/
abbrev add (xs ys : Coeffs) : Coeffs := IntList.add xs ys
/-- Shim for `IntList.sub`. -/
abbrev sub (xs ys : Coeffs) : Coeffs := IntList.sub xs ys
/-- Shim for `IntList.neg`. -/
abbrev neg (xs : Coeffs) : Coeffs := IntList.neg xs
/-- Shim for `IntList.combo`. -/
abbrev combo (a : Int) (xs : Coeffs) (b : Int) (ys : Coeffs) : Coeffs := IntList.combo a xs b ys
/-- Shim for `List.length`. -/
abbrev length (xs : Coeffs) := List.length xs
/-- Shim for `IntList.leading`. -/
abbrev leading (xs : Coeffs) : Int := IntList.leading xs
/-- Shim for `List.map`. -/
abbrev map (f : Int Int) (xs : Coeffs) : Coeffs := List.map f xs
/-- Shim for `.enum.find?`. -/
abbrev findIdx? (f : Int Bool) (xs : Coeffs) : Option Nat :=
-- List.findIdx? f xs
-- We could avoid `Std.Data.List.Basic` by using the less efficient:
xs.enum.find? (f ·.2) |>.map (·.1)
/-- Shim for `IntList.bmod`. -/
abbrev bmod (x : Coeffs) (m : Nat) : Coeffs := IntList.bmod x m
/-- Shim for `IntList.bmod_dot_sub_dot_bmod`. -/
abbrev bmod_dot_sub_dot_bmod (m : Nat) (a b : Coeffs) : Int :=
IntList.bmod_dot_sub_dot_bmod m a b
theorem bmod_length (x : Coeffs) (m : Nat) : (bmod x m).length x.length :=
IntList.bmod_length x m
theorem dvd_bmod_dot_sub_dot_bmod (m : Nat) (xs ys : Coeffs) :
(m : Int) bmod_dot_sub_dot_bmod m xs ys := IntList.dvd_bmod_dot_sub_dot_bmod m xs ys
theorem get_of_length_le {i : Nat} {xs : Coeffs} (h : length xs i) : get xs i = 0 :=
IntList.get_of_length_le h
theorem dot_set_left (xs ys : Coeffs) (i : Nat) (z : Int) :
dot (set xs i z) ys = dot xs ys + (z - get xs i) * get ys i :=
IntList.dot_set_left xs ys i z
theorem dot_sdiv_left (xs ys : Coeffs) {d : Int} (h : d xs.gcd) :
dot (xs.sdiv d) ys = (dot xs ys) / d :=
IntList.dot_sdiv_left xs ys h
theorem dot_smul_left (xs ys : Coeffs) (i : Int) : dot (i * xs) ys = i * dot xs ys :=
IntList.dot_smul_left xs ys i
theorem dot_distrib_left (xs ys zs : Coeffs) : (xs + ys).dot zs = xs.dot zs + ys.dot zs :=
IntList.dot_distrib_left xs ys zs
theorem sub_eq_add_neg (xs ys : Coeffs) : xs - ys = xs + -ys :=
IntList.sub_eq_add_neg xs ys
theorem combo_eq_smul_add_smul (a : Int) (xs : Coeffs) (b : Int) (ys : Coeffs) :
combo a xs b ys = (a * xs) + (b * ys) :=
IntList.combo_eq_smul_add_smul a xs b ys
theorem gcd_dvd_dot_left (xs ys : Coeffs) : (gcd xs : Int) dot xs ys :=
IntList.gcd_dvd_dot_left xs ys
theorem map_length {xs : Coeffs} : (xs.map f).length xs.length :=
Nat.le_of_eq (List.length_map xs f)
theorem dot_nil_right {xs : Coeffs} : dot xs .nil = 0 := IntList.dot_nil_right
theorem get_nil : get .nil i = 0 := IntList.get_nil
theorem dot_neg_left (xs ys : IntList) : dot (-xs) ys = -dot xs ys :=
IntList.dot_neg_left xs ys
end Coeffs
end Lean.Omega

View File

@@ -0,0 +1,395 @@
/-
Copyright (c) 2023 Lean FRO, LLC. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Scott Morrison
-/
prelude
import Init.Omega.LinearCombo
import Init.Omega.Int
/-!
A `Constraint` consists of an optional lower and upper bound (inclusive),
constraining a value to a set of the form `∅`, `{x}`, `[x, y]`, `[x, ∞)`, `(-∞, y]`, or `(-∞, ∞)`.
-/
namespace Lean.Omega
/-- An optional lower bound on a integer. -/
abbrev LowerBound : Type := Option Int
/-- An optional upper bound on a integer. -/
abbrev UpperBound : Type := Option Int
/-- A lower bound at `x` is satisfied at `t` if `x ≤ t`. -/
abbrev LowerBound.sat (b : LowerBound) (t : Int) := b.all fun x => x t
/-- A upper bound at `y` is satisfied at `t` if `t ≤ y`. -/
abbrev UpperBound.sat (b : UpperBound) (t : Int) := b.all fun y => t y
/--
A `Constraint` consists of an optional lower and upper bound (inclusive),
constraining a value to a set of the form `∅`, `{x}`, `[x, y]`, `[x, ∞)`, `(-∞, y]`, or `(-∞, ∞)`.
-/
structure Constraint where
/-- A lower bound. -/
lowerBound : LowerBound
/-- An upper bound. -/
upperBound : UpperBound
deriving BEq, DecidableEq, Repr
namespace Constraint
instance : ToString Constraint where
toString := fun
| none, none => "(-∞, ∞)"
| none, some y => s!"(-∞, {y}]"
| some x, none => s!"[{x}, ∞)"
| some x, some y =>
if y < x then "" else if x = y then s!"\{{x}}" else s!"[{x}, {y}]"
/-- A constraint is satisfied at `t` is both the lower bound and upper bound are satisfied. -/
def sat (c : Constraint) (t : Int) : Bool := c.lowerBound.sat t c.upperBound.sat t
/-- Apply a function to both the lower bound and upper bound. -/
def map (c : Constraint) (f : Int Int) : Constraint where
lowerBound := c.lowerBound.map f
upperBound := c.upperBound.map f
/-- Translate a constraint. -/
def translate (c : Constraint) (t : Int) : Constraint := c.map (· + t)
theorem translate_sat : {c : Constraint} {v : Int} sat c v sat (c.translate t) (v + t) := by
rintro _ | l, _ | u v w <;> simp_all [sat, translate, map]
· exact Int.add_le_add_right w t
· exact Int.add_le_add_right w t
· rcases w with w₁, w₂; constructor
· exact Int.add_le_add_right w₁ t
· exact Int.add_le_add_right w₂ t
/--
Flip a constraint.
This operation is not useful by itself, but is used to implement `neg` and `scale`.
-/
def flip (c : Constraint) : Constraint where
lowerBound := c.upperBound
upperBound := c.lowerBound
/--
Negate a constraint. `[x, y]` becomes `[-y, -x]`.
-/
def neg (c : Constraint) : Constraint := c.flip.map (- ·)
theorem neg_sat : {c : Constraint} {v : Int} sat c v sat (c.neg) (-v) := by
rintro _ | l, _ | u v w <;> simp_all [sat, neg, flip, map]
· exact Int.neg_le_neg w
· exact Int.neg_le_neg w
· rcases w with w₁, w₂; constructor
· exact Int.neg_le_neg w₂
· exact Int.neg_le_neg w₁
/-- The trivial constraint, satisfied everywhere. -/
def trivial : Constraint := none, none
/-- The impossible constraint, unsatisfiable. -/
def impossible : Constraint := some 1, some 0
/-- An exact constraint. -/
def exact (r : Int) : Constraint := some r, some r
@[simp] theorem trivial_say : trivial.sat t := by
simp [sat, trivial]
@[simp] theorem exact_sat (r : Int) (t : Int) : (exact r).sat t = decide (r = t) := by
simp only [sat, exact, Option.all_some, decide_eq_true_eq, decide_eq_decide]
exact Int.eq_iff_le_and_ge.symm
/-- Check if a constraint is unsatisfiable. -/
def isImpossible : Constraint Bool
| some x, some y => y < x
| _ => false
/-- Check if a constraint requires an exact value. -/
def isExact : Constraint Bool
| some x, some y => x = y
| _ => false
theorem not_sat_of_isImpossible (h : isImpossible c) {t} : ¬ c.sat t := by
rcases c with _ | l, _ | u <;> simp [isImpossible, sat] at h
intro w
rw [Int.not_le]
exact Int.lt_of_lt_of_le h w
/--
Scale a constraint by multiplying by an integer.
* If `k = 0` this is either impossible, if the original constraint was impossible,
or the `= 0` exact constraint.
* If `k` is positive this takes `[x, y]` to `[k * x, k * y]`
* If `k` is negative this takes `[x, y]` to `[k * y, k * x]`.
-/
def scale (k : Int) (c : Constraint) : Constraint :=
if k = 0 then
if c.isImpossible then c else some 0, some 0
else if 0 < k then
c.map (k * ·)
else
c.flip.map (k * ·)
theorem scale_sat {c : Constraint} (k) (w : c.sat t) : (scale k c).sat (k * t) := by
simp [scale]
split
· split
· simp_all [not_sat_of_isImpossible]
· simp_all [sat]
· rcases c with _ | l, _ | u <;> split <;> rename_i h <;> simp_all [sat, flip, map]
· replace h := Int.le_of_lt h
exact Int.mul_le_mul_of_nonneg_left w h
· rw [Int.not_lt] at h
exact Int.mul_le_mul_of_nonpos_left h w
· replace h := Int.le_of_lt h
exact Int.mul_le_mul_of_nonneg_left w h
· rw [Int.not_lt] at h
exact Int.mul_le_mul_of_nonpos_left h w
· constructor
· exact Int.mul_le_mul_of_nonneg_left w.1 (Int.le_of_lt h)
· exact Int.mul_le_mul_of_nonneg_left w.2 (Int.le_of_lt h)
· replace h := Int.not_lt.mp h
constructor
· exact Int.mul_le_mul_of_nonpos_left h w.2
· exact Int.mul_le_mul_of_nonpos_left h w.1
/-- The sum of two constraints. `[a, b] + [c, d] = [a + c, b + d]`. -/
def add (x y : Constraint) : Constraint where
lowerBound := x.lowerBound.bind fun a => y.lowerBound.map fun b => a + b
upperBound := x.upperBound.bind fun a => y.upperBound.map fun b => a + b
theorem add_sat (w₁ : c₁.sat x₁) (w₂ : c₂.sat x₂) : (add c₁ c₂).sat (x₁ + x₂) := by
rcases c₁ with _ | l₁, _ | u₁ <;> rcases c₂ with _ | l₂, _ | u₂
<;> simp [sat, LowerBound.sat, UpperBound.sat, add] at *
· exact Int.add_le_add w₁ w₂
· exact Int.add_le_add w₁ w₂.2
· exact Int.add_le_add w₁ w₂
· exact Int.add_le_add w₁ w₂.1
· exact Int.add_le_add w₁.2 w₂
· exact Int.add_le_add w₁.1 w₂
· constructor
· exact Int.add_le_add w₁.1 w₂.1
· exact Int.add_le_add w₁.2 w₂.2
/-- A linear combination of two constraints. -/
def combo (a : Int) (x : Constraint) (b : Int) (y : Constraint) : Constraint :=
add (scale a x) (scale b y)
theorem combo_sat (a) (w₁ : c₁.sat x₁) (b) (w₂ : c₂.sat x₂) :
(combo a c₁ b c₂).sat (a * x₁ + b * x₂) :=
add_sat (scale_sat a w₁) (scale_sat b w₂)
/-- The conjunction of two constraints. -/
def combine (x y : Constraint) : Constraint where
lowerBound := max x.lowerBound y.lowerBound
upperBound := min x.upperBound y.upperBound
theorem combine_sat : (c : Constraint) (c' : Constraint) (t : Int)
(c.combine c').sat t = (c.sat t c'.sat t) := by
rintro _ | l₁, _ | u₁ <;> rintro _ | l₂, _ | u₂ t
<;> simp [sat, LowerBound.sat, UpperBound.sat, combine, Int.le_min, Int.max_le] at *
· rw [And.comm]
· rw [ and_assoc, And.comm (a := l₂ t), and_assoc]
· rw [and_assoc]
· rw [and_assoc]
· rw [and_assoc, and_assoc, And.comm (a := l₂ t)]
· rw [and_assoc, and_assoc (a := l₂ t), And.comm (a := l₂ t), and_assoc, and_assoc]
/--
Dividing a constraint by a natural number, and tightened to integer bounds.
Thus the lower bound is rounded up, and the upper bound is rounded down.
-/
def div (c : Constraint) (k : Nat) : Constraint where
lowerBound := c.lowerBound.map fun x => (- ((- x) / k))
upperBound := c.upperBound.map fun y => y / k
theorem div_sat (c : Constraint) (t : Int) (k : Nat) (n : k 0) (h : (k : Int) t) (w : c.sat t) :
(c.div k).sat (t / k) := by
replace n : (k : Int) > 0 := Int.ofNat_lt.mpr (Nat.pos_of_ne_zero n)
rcases c with _ | l, _ | u
· simp_all [sat, div]
· simp [sat, div] at w
apply Int.le_of_sub_nonneg
rw [ Int.sub_ediv_of_dvd _ h, ge_iff_le, Int.div_nonneg_iff_of_pos n]
exact Int.sub_nonneg_of_le w
· simp [sat, div] at w
apply Int.le_of_sub_nonneg
rw [Int.sub_neg, Int.add_ediv_of_dvd_left h, ge_iff_le,
Int.div_nonneg_iff_of_pos n]
exact Int.sub_nonneg_of_le w
· simp [sat, div] at w
constructor
· apply Int.le_of_sub_nonneg
rw [Int.sub_neg, Int.add_ediv_of_dvd_left h, ge_iff_le,
Int.div_nonneg_iff_of_pos n]
exact Int.sub_nonneg_of_le w.1
· apply Int.le_of_sub_nonneg
rw [ Int.sub_ediv_of_dvd _ h, ge_iff_le, Int.div_nonneg_iff_of_pos n]
exact Int.sub_nonneg_of_le w.2
/--
It is convenient below to say that a constraint is satisfied at the dot product of two vectors,
so we make an abbreviation `sat'` for this.
-/
abbrev sat' (c : Constraint) (x y : Coeffs) := c.sat (Coeffs.dot x y)
theorem combine_sat' {s t : Constraint} {x y} (ws : s.sat' x y) (wt : t.sat' x y) :
(s.combine t).sat' x y := (combine_sat _ _ _).mpr ws, wt
theorem div_sat' {c : Constraint} {x y} (h : Coeffs.gcd x 0) (w : c.sat (Coeffs.dot x y)) :
(c.div (Coeffs.gcd x)).sat' (Coeffs.sdiv x (Coeffs.gcd x)) y := by
dsimp [sat']
rw [Coeffs.dot_sdiv_left _ _ (Int.dvd_refl _)]
exact div_sat c _ (Coeffs.gcd x) h (Coeffs.gcd_dvd_dot_left x y) w
theorem not_sat'_of_isImpossible (h : isImpossible c) {x y} : ¬ c.sat' x y :=
not_sat_of_isImpossible h
theorem addInequality_sat (w : c + Coeffs.dot x y 0) :
Constraint.sat' { lowerBound := some (-c), upperBound := none } x y := by
simp [Constraint.sat', Constraint.sat]
rw [ Int.zero_sub c]
exact Int.sub_left_le_of_le_add w
theorem addEquality_sat (w : c + Coeffs.dot x y = 0) :
Constraint.sat' { lowerBound := some (-c), upperBound := some (-c) } x y := by
simp [Constraint.sat', Constraint.sat]
rw [Int.eq_iff_le_and_ge] at w
rwa [Int.add_le_zero_iff_le_neg', Int.add_nonnneg_iff_neg_le', and_comm] at w
end Constraint
/--
Normalize a constraint, by dividing through by the GCD.
Return `none` if there is nothing to do, to avoid adding unnecessary steps to the proof term.
-/
def normalize? : Constraint × Coeffs Option (Constraint × Coeffs)
| s, x =>
let gcd := Coeffs.gcd x -- TODO should we be caching this?
if gcd = 0 then
if s.sat 0 then
some (.trivial, x)
else
some (.impossible, x)
else if gcd = 1 then
none
else
some (s.div gcd, Coeffs.sdiv x gcd)
/-- Normalize a constraint, by dividing through by the GCD. -/
def normalize (p : Constraint × Coeffs) : Constraint × Coeffs :=
normalize? p |>.getD p
/-- Shorthand for the first component of `normalize`. -/
-- This `noncomputable` (and others below) is a safeguard that we only use this in proofs.
noncomputable abbrev normalizeConstraint (s : Constraint) (x : Coeffs) : Constraint :=
(normalize (s, x)).1
/-- Shorthand for the second component of `normalize`. -/
noncomputable abbrev normalizeCoeffs (s : Constraint) (x : Coeffs) : Coeffs :=
(normalize (s, x)).2
theorem normalize?_eq_some (w : normalize? (s, x) = some (s', x')) :
normalizeConstraint s x = s' normalizeCoeffs s x = x' := by
simp_all [normalizeConstraint, normalizeCoeffs, normalize]
theorem normalize_sat {s x v} (w : s.sat' x v) :
(normalizeConstraint s x).sat' (normalizeCoeffs s x) v := by
dsimp [normalizeConstraint, normalizeCoeffs, normalize, normalize?]
split <;> rename_i h
· split
· simp
· dsimp [Constraint.sat'] at w
simp_all
· split
· exact w
· exact Constraint.div_sat' h w
/-- Multiply by `-1` if the leading coefficient is negative, otherwise return `none`. -/
def positivize? : Constraint × Coeffs Option (Constraint × Coeffs)
| s, x =>
if 0 x.leading then
none
else
(s.neg, Coeffs.smul x (-1))
/-- Multiply by `-1` if the leading coefficient is negative, otherwise do nothing. -/
noncomputable def positivize (p : Constraint × Coeffs) : Constraint × Coeffs :=
positivize? p |>.getD p
/-- Shorthand for the first component of `positivize`. -/
noncomputable abbrev positivizeConstraint (s : Constraint) (x : Coeffs) : Constraint :=
(positivize (s, x)).1
/-- Shorthand for the second component of `positivize`. -/
noncomputable abbrev positivizeCoeffs (s : Constraint) (x : Coeffs) : Coeffs :=
(positivize (s, x)).2
theorem positivize?_eq_some (w : positivize? (s, x) = some (s', x')) :
positivizeConstraint s x = s' positivizeCoeffs s x = x' := by
simp_all [positivizeConstraint, positivizeCoeffs, positivize]
theorem positivize_sat {s x v} (w : s.sat' x v) :
(positivizeConstraint s x).sat' (positivizeCoeffs s x) v := by
dsimp [positivizeConstraint, positivizeCoeffs, positivize, positivize?]
split
· exact w
· simp [Constraint.sat']
erw [Coeffs.dot_smul_left, Int.neg_eq_neg_one_mul]
exact Constraint.neg_sat w
/-- `positivize` and `normalize`, returning `none` if neither does anything. -/
def tidy? : Constraint × Coeffs Option (Constraint × Coeffs)
| s, x =>
match positivize? (s, x) with
| none => match normalize? (s, x) with
| none => none
| some (s', x') => some (s', x')
| some (s', x') => normalize (s', x')
/-- `positivize` and `normalize` -/
def tidy (p : Constraint × Coeffs) : Constraint × Coeffs :=
tidy? p |>.getD p
/-- Shorthand for the first component of `tidy`. -/
abbrev tidyConstraint (s : Constraint) (x : Coeffs) : Constraint := (tidy (s, x)).1
/-- Shorthand for the second component of `tidy`. -/
abbrev tidyCoeffs (s : Constraint) (x : Coeffs) : Coeffs := (tidy (s, x)).2
theorem tidy_sat {s x v} (w : s.sat' x v) : (tidyConstraint s x).sat' (tidyCoeffs s x) v := by
dsimp [tidyConstraint, tidyCoeffs, tidy, tidy?]
split <;> rename_i hp
· split <;> rename_i hn
· simp_all
· rcases normalize?_eq_some hn with rfl, rfl
exact normalize_sat w
· rcases positivize?_eq_some hp with rfl, rfl
exact normalize_sat (positivize_sat w)
theorem combo_sat' (s t : Constraint)
(a : Int) (x : Coeffs) (b : Int) (y : Coeffs) (v : Coeffs)
(wx : s.sat' x v) (wy : t.sat' y v) :
(Constraint.combo a s b t).sat' (Coeffs.combo a x b y) v := by
rw [Constraint.sat', Coeffs.combo_eq_smul_add_smul, Coeffs.dot_distrib_left,
Coeffs.dot_smul_left, Coeffs.dot_smul_left]
exact Constraint.combo_sat a wx b wy
/-- The value of the new variable introduced when solving a hard equality. -/
abbrev bmod_div_term (m : Nat) (a b : Coeffs) : Int := Coeffs.bmod_dot_sub_dot_bmod m a b / m
/-- The coefficients of the new equation generated when solving a hard equality. -/
def bmod_coeffs (m : Nat) (i : Nat) (x : Coeffs) : Coeffs :=
Coeffs.set (Coeffs.bmod x m) i m
theorem bmod_sat (m : Nat) (r : Int) (i : Nat) (x v : Coeffs)
(h : x.length i) -- during proof reconstruction this will be by `decide`
(p : Coeffs.get v i = bmod_div_term m x v) -- and this will be by `rfl`
(w : (Constraint.exact r).sat' x v) :
(Constraint.exact (Int.bmod r m)).sat' (bmod_coeffs m i x) v := by
simp at w
simp only [p, bmod_coeffs, Constraint.exact_sat, Coeffs.dot_set_left, decide_eq_true_eq]
replace h := Nat.le_trans (Coeffs.bmod_length x m) h
rw [Coeffs.get_of_length_le h, Int.sub_zero,
Int.mul_ediv_cancel' (Coeffs.dvd_bmod_dot_sub_dot_bmod _ _ _), w,
Int.add_sub_assoc, Int.add_comm, Int.add_sub_assoc, Int.sub_self, Int.add_zero]
end Lean.Omega

174
src/Init/Omega/Int.lean Normal file
View File

@@ -0,0 +1,174 @@
/-
Copyright (c) 2023 Lean FRO, LLC. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Scott Morrison
-/
prelude
import Init.Data.Int.Order
/-!
# Lemmas about `Nat` and `Int` needed internally by `omega`.
These statements are useful for constructing proof expressions,
but unlikely to be widely useful, so are inside the `Std.Tactic.Omega` namespace.
If you do find a use for them, please move them into the appropriate file and namespace!
-/
namespace Lean.Omega
namespace Int
theorem ofNat_pow (a b : Nat) : ((a ^ b : Nat) : Int) = (a : Int) ^ b := by
induction b with
| zero => rfl
| succ b ih => rw [Nat.pow_succ, Int.ofNat_mul, ih]; rfl
theorem pos_pow_of_pos (a : Int) (b : Nat) (h : 0 < a) : 0 < a ^ b := by
rw [Int.eq_natAbs_of_zero_le (Int.le_of_lt h), Int.ofNat_zero, Int.ofNat_pow, Int.ofNat_lt]
exact Nat.pos_pow_of_pos _ (Int.natAbs_pos.mpr (Int.ne_of_gt h))
theorem ofNat_pos {a : Nat} : 0 < (a : Int) 0 < a := by
rw [ Int.ofNat_zero, Int.ofNat_lt]
theorem ofNat_pos_of_pos {a : Nat} (h : 0 < a) : 0 < (a : Int) :=
ofNat_pos.mpr h
theorem natCast_ofNat {x : Nat} :
@Nat.cast Int instNatCastInt (no_index (OfNat.ofNat x)) = OfNat.ofNat x := rfl
theorem ofNat_lt_of_lt {x y : Nat} (h : x < y) : (x : Int) < (y : Int) :=
Int.ofNat_lt.mpr h
theorem ofNat_le_of_le {x y : Nat} (h : x y) : (x : Int) (y : Int) :=
Int.ofNat_le.mpr h
-- FIXME these are insane:
theorem lt_of_not_ge {x y : Int} (h : ¬ (x y)) : y < x := Int.not_le.mp h
theorem lt_of_not_le {x y : Int} (h : ¬ (x y)) : y < x := Int.not_le.mp h
theorem not_le_of_lt {x y : Int} (h : y < x) : ¬ (x y) := Int.not_le.mpr h
theorem lt_le_asymm {x y : Int} (h₁ : y < x) (h₂ : x y) : False := Int.not_le.mpr h₁ h₂
theorem le_lt_asymm {x y : Int} (h₁ : y x) (h₂ : x < y) : False := Int.not_lt.mpr h₁ h₂
theorem le_of_not_gt {x y : Int} (h : ¬ (y > x)) : y x := Int.not_lt.mp h
theorem not_lt_of_ge {x y : Int} (h : y x) : ¬ (y < x) := Int.not_lt.mpr h
theorem le_of_not_lt {x y : Int} (h : ¬ (x < y)) : y x := Int.not_lt.mp h
theorem not_lt_of_le {x y : Int} (h : y x) : ¬ (x < y) := Int.not_lt.mpr h
theorem add_congr {a b c d : Int} (h₁ : a = b) (h₂ : c = d) : a + c = b + d := by
subst h₁; subst h₂; rfl
theorem mul_congr {a b c d : Int} (h₁ : a = b) (h₂ : c = d) : a * c = b * d := by
subst h₁; subst h₂; rfl
theorem mul_congr_left {a b : Int} (h₁ : a = b) (c : Int) : a * c = b * c := by
subst h₁; rfl
theorem sub_congr {a b c d : Int} (h₁ : a = b) (h₂ : c = d) : a - c = b - d := by
subst h₁; subst h₂; rfl
theorem neg_congr {a b : Int} (h₁ : a = b) : -a = -b := by
subst h₁; rfl
theorem lt_of_gt {x y : Int} (h : x > y) : y < x := gt_iff_lt.mp h
theorem le_of_ge {x y : Int} (h : x y) : y x := ge_iff_le.mp h
theorem ofNat_sub_eq_zero {b a : Nat} (h : ¬ b a) : ((a - b : Nat) : Int) = 0 :=
Int.ofNat_eq_zero.mpr (Nat.sub_eq_zero_of_le (Nat.le_of_lt (Nat.not_le.mp h)))
theorem ofNat_sub_dichotomy {a b : Nat} :
b a ((a - b : Nat) : Int) = a - b a < b ((a - b : Nat) : Int) = 0 := by
by_cases h : b a
· left
have t := Int.ofNat_sub h
simp at t
exact h, t
· right
have t := Nat.not_le.mp h
simp [Int.ofNat_sub_eq_zero h]
exact t
theorem ofNat_congr {a b : Nat} (h : a = b) : (a : Int) = (b : Int) := congrArg _ h
theorem ofNat_sub_sub {a b c : Nat} : ((a - b - c : Nat) : Int) = ((a - (b + c) : Nat) : Int) :=
congrArg _ (Nat.sub_sub _ _ _)
theorem ofNat_min (a b : Nat) : ((min a b : Nat) : Int) = min (a : Int) (b : Int) := by
simp only [Nat.min_def, Int.min_def, Int.ofNat_le]
split <;> rfl
theorem ofNat_max (a b : Nat) : ((max a b : Nat) : Int) = max (a : Int) (b : Int) := by
simp only [Nat.max_def, Int.max_def, Int.ofNat_le]
split <;> rfl
theorem ofNat_natAbs (a : Int) : (a.natAbs : Int) = if 0 a then a else -a := by
rw [Int.natAbs]
split <;> rename_i n
· simp only [Int.ofNat_eq_coe]
rw [if_pos (Int.ofNat_nonneg n)]
· simp; rfl
theorem natAbs_dichotomy {a : Int} : 0 a a.natAbs = a a < 0 a.natAbs = -a := by
by_cases h : 0 a
· left
simp_all [Int.natAbs_of_nonneg]
· right
rw [Int.not_le] at h
rw [Int.ofNat_natAbs_of_nonpos (Int.le_of_lt h)]
simp_all
theorem neg_le_natAbs {a : Int} : -a a.natAbs := by
have t := Int.le_natAbs (a := -a)
simp at t
exact t
theorem add_le_iff_le_sub (a b c : Int) : a + b c a c - b := by
conv =>
lhs
rw [ Int.add_zero c, Int.sub_self (-b), Int.sub_eq_add_neg, Int.add_assoc, Int.neg_neg,
Int.add_le_add_iff_right]
theorem le_add_iff_sub_le (a b c : Int) : a b + c a - c b := by
conv =>
lhs
rw [ Int.neg_neg c, Int.sub_eq_add_neg, add_le_iff_le_sub]
theorem add_le_zero_iff_le_neg (a b : Int) : a + b 0 a - b := by
rw [add_le_iff_le_sub, Int.zero_sub]
theorem add_le_zero_iff_le_neg' (a b : Int) : a + b 0 b -a := by
rw [Int.add_comm, add_le_zero_iff_le_neg]
theorem add_nonnneg_iff_neg_le (a b : Int) : 0 a + b -b a := by
rw [le_add_iff_sub_le, Int.zero_sub]
theorem add_nonnneg_iff_neg_le' (a b : Int) : 0 a + b -a b := by
rw [Int.add_comm, add_nonnneg_iff_neg_le]
theorem ofNat_fst_mk {β} {x : Nat} {y : β} : (Prod.mk x y).fst = (x : Int) := rfl
theorem ofNat_snd_mk {α} {x : α} {y : Nat} : (Prod.mk x y).snd = (y : Int) := rfl
end Int
namespace Nat
theorem lt_of_gt {x y : Nat} (h : x > y) : y < x := gt_iff_lt.mp h
theorem le_of_ge {x y : Nat} (h : x y) : y x := ge_iff_le.mp h
end Nat
namespace Prod
theorem of_lex (w : Prod.Lex r s p q) : r p.fst q.fst p.fst = q.fst s p.snd q.snd :=
(Prod.lex_def r s).mp w
theorem of_not_lex {α} {r : α α Prop} [DecidableEq α] {β} {s : β β Prop}
{p q : α × β} (w : ¬ Prod.Lex r s p q) :
¬ r p.fst q.fst (p.fst q.fst ¬ s p.snd q.snd) := by
rw [Prod.lex_def, not_or, Decidable.not_and_iff_or_not_not] at w
exact w
theorem fst_mk : (Prod.mk x y).fst = x := rfl
theorem snd_mk : (Prod.mk x y).snd = y := rfl
end Prod
end Lean.Omega

412
src/Init/Omega/IntList.lean Normal file
View File

@@ -0,0 +1,412 @@
/-
Copyright (c) 2023 Lean FRO, LLC. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Scott Morrison
-/
prelude
import Init.Data.List.Lemmas
namespace Lean.Omega
/--
A type synonym for `List Int`, used by `omega` for dense representation of coefficients.
We define algebraic operations,
interpreting `List Int` as a finitely supported function `Nat → Int`.
-/
abbrev IntList := List Int
namespace IntList
/-- Get the `i`-th element (interpreted as `0` if the list is not long enough). -/
def get (xs : IntList) (i : Nat) : Int := (xs.get? i).getD 0
@[simp] theorem get_nil : get ([] : IntList) i = 0 := rfl
@[simp] theorem get_cons_zero : get (x :: xs) 0 = x := rfl
@[simp] theorem get_cons_succ : get (x :: xs) (i+1) = get xs i := rfl
theorem get_map {xs : IntList} (h : f 0 = 0) : get (xs.map f) i = f (xs.get i) := by
simp only [get, List.get?_map]
cases xs.get? i <;> simp_all
theorem get_of_length_le {xs : IntList} (h : xs.length i) : xs.get i = 0 := by
rw [get, List.get?_eq_none.mpr h]
rfl
-- theorem lt_length_of_get_nonzero {xs : IntList} (h : xs.get i ≠ 0) : i < xs.length := by
-- revert h
-- simpa using mt get_of_length_le
/-- Like `List.set`, but right-pad with zeroes as necessary first. -/
def set (xs : IntList) (i : Nat) (y : Int) : IntList :=
match xs, i with
| [], 0 => [y]
| [], (i+1) => 0 :: set [] i y
| _ :: xs, 0 => y :: xs
| x :: xs, (i+1) => x :: set xs i y
@[simp] theorem set_nil_zero : set [] 0 y = [y] := rfl
@[simp] theorem set_nil_succ : set [] (i+1) y = 0 :: set [] i y := rfl
@[simp] theorem set_cons_zero : set (x :: xs) 0 y = y :: xs := rfl
@[simp] theorem set_cons_succ : set (x :: xs) (i+1) y = x :: set xs i y := rfl
/-- Returns the leading coefficient, i.e. the first non-zero entry. -/
def leading (xs : IntList) : Int := xs.find? (! · == 0) |>.getD 0
/-- Implementation of `+` on `IntList`. -/
def add (xs ys : IntList) : IntList :=
List.zipWithAll (fun x y => x.getD 0 + y.getD 0) xs ys
instance : Add IntList := add
theorem add_def (xs ys : IntList) :
xs + ys = List.zipWithAll (fun x y => x.getD 0 + y.getD 0) xs ys :=
rfl
@[simp] theorem add_get (xs ys : IntList) (i : Nat) : (xs + ys).get i = xs.get i + ys.get i := by
simp only [add_def, get, List.zipWithAll_get?, List.get?_eq_none]
cases xs.get? i <;> cases ys.get? i <;> simp
@[simp] theorem add_nil (xs : IntList) : xs + [] = xs := by simp [add_def]
@[simp] theorem nil_add (xs : IntList) : [] + xs = xs := by simp [add_def]
@[simp] theorem cons_add_cons (x) (xs : IntList) (y) (ys : IntList) :
(x :: xs) + (y :: ys) = (x + y) :: (xs + ys) := by simp [add_def]
/-- Implementation of `*` on `IntList`. -/
def mul (xs ys : IntList) : IntList := List.zipWith (· * ·) xs ys
instance : Mul IntList := mul
theorem mul_def (xs ys : IntList) : xs * ys = List.zipWith (· * ·) xs ys :=
rfl
@[simp] theorem mul_get (xs ys : IntList) (i : Nat) : (xs * ys).get i = xs.get i * ys.get i := by
simp only [mul_def, get, List.zipWith_get?]
cases xs.get? i <;> cases ys.get? i <;> simp
@[simp] theorem mul_nil_left : ([] : IntList) * ys = [] := rfl
@[simp] theorem mul_nil_right : xs * ([] : IntList) = [] := List.zipWith_nil_right
@[simp] theorem mul_cons₂ : (x::xs : IntList) * (y::ys) = (x * y) :: (xs * ys) := rfl
/-- Implementation of negation on `IntList`. -/
def neg (xs : IntList) : IntList := xs.map fun x => -x
instance : Neg IntList := neg
theorem neg_def (xs : IntList) : - xs = xs.map fun x => -x := rfl
@[simp] theorem neg_get (xs : IntList) (i : Nat) : (- xs).get i = - xs.get i := by
simp only [neg_def, get, List.get?_map]
cases xs.get? i <;> simp
@[simp] theorem neg_nil : (- ([] : IntList)) = [] := rfl
@[simp] theorem neg_cons : (- (x::xs : IntList)) = -x :: -xs := rfl
/-- Implementation of subtraction on `IntList`. -/
def sub (xs ys : IntList) : IntList :=
List.zipWithAll (fun x y => x.getD 0 - y.getD 0) xs ys
instance : Sub IntList := sub
theorem sub_def (xs ys : IntList) :
xs - ys = List.zipWithAll (fun x y => x.getD 0 - y.getD 0) xs ys :=
rfl
/-- Implementation of scalar multiplication by an integer on `IntList`. -/
def smul (xs : IntList) (i : Int) : IntList :=
xs.map fun x => i * x
instance : HMul Int IntList IntList where
hMul i xs := xs.smul i
theorem smul_def (xs : IntList) (i : Int) : i * xs = xs.map fun x => i * x := rfl
@[simp] theorem smul_get (xs : IntList) (a : Int) (i : Nat) : (a * xs).get i = a * xs.get i := by
simp only [smul_def, get, List.get?_map]
cases xs.get? i <;> simp
@[simp] theorem smul_nil {i : Int} : i * ([] : IntList) = [] := rfl
@[simp] theorem smul_cons {i : Int} : i * (x::xs : IntList) = i * x :: i * xs := rfl
/-- A linear combination of two `IntList`s. -/
def combo (a : Int) (xs : IntList) (b : Int) (ys : IntList) : IntList :=
List.zipWithAll (fun x y => a * x.getD 0 + b * y.getD 0) xs ys
theorem combo_eq_smul_add_smul (a : Int) (xs : IntList) (b : Int) (ys : IntList) :
combo a xs b ys = a * xs + b * ys := by
dsimp [combo]
induction xs generalizing ys with
| nil => simp; rfl
| cons x xs ih =>
cases ys with
| nil => simp; rfl
| cons y ys => simp_all
attribute [local simp] add_def mul_def in
theorem mul_distrib_left (xs ys zs : IntList) : (xs + ys) * zs = xs * zs + ys * zs := by
induction xs generalizing ys zs with
| nil =>
cases ys with
| nil => simp
| cons _ _ =>
cases zs with
| nil => simp
| cons _ _ => simp_all [Int.add_mul]
| cons x xs ih₁ =>
cases ys with
| nil => simp_all
| cons _ _ =>
cases zs with
| nil => simp
| cons _ _ => simp_all [Int.add_mul]
theorem mul_neg_left (xs ys : IntList) : (-xs) * ys = -(xs * ys) := by
induction xs generalizing ys with
| nil => simp
| cons x xs ih =>
cases ys with
| nil => simp
| cons y ys => simp_all [Int.neg_mul]
attribute [local simp] add_def neg_def sub_def in
theorem sub_eq_add_neg (xs ys : IntList) : xs - ys = xs + (-ys) := by
induction xs generalizing ys with
| nil => simp; rfl
| cons x xs ih =>
cases ys with
| nil => simp
| cons y ys => simp_all [Int.sub_eq_add_neg]
@[simp] theorem mul_smul_left {i : Int} {xs ys : IntList} : (i * xs) * ys = i * (xs * ys) := by
induction xs generalizing ys with
| nil => simp
| cons x xs ih =>
cases ys with
| nil => simp
| cons y ys => simp_all [Int.mul_assoc]
/-- The sum of the entries of an `IntList`. -/
def sum (xs : IntList) : Int := xs.foldr (· + ·) 0
@[simp] theorem sum_nil : sum ([] : IntList) = 0 := rfl
@[simp] theorem sum_cons : sum (x::xs : IntList) = x + sum xs := rfl
attribute [local simp] sum add_def in
theorem sum_add (xs ys : IntList) : (xs + ys).sum = xs.sum + ys.sum := by
induction xs generalizing ys with
| nil => simp
| cons x xs ih =>
cases ys with
| nil => simp
| cons y ys => simp_all [Int.add_assoc, Int.add_left_comm]
@[simp]
theorem sum_neg (xs : IntList) : (-xs).sum = -(xs.sum) := by
induction xs with
| nil => simp
| cons x xs ih => simp_all [Int.neg_add]
@[simp]
theorem sum_smul (i : Int) (xs : IntList) : (i * xs).sum = i * (xs.sum) := by
induction xs with
| nil => simp
| cons x xs ih => simp_all [Int.mul_add]
/-- The dot product of two `IntList`s. -/
def dot (xs ys : IntList) : Int := (xs * ys).sum
example : IntList.dot [a, b, c] [x, y, z] = IntList.dot [a, b, c, d] [x, y, z] := rfl
example : IntList.dot [a, b, c] [x, y, z] = IntList.dot [a, b, c] [x, y, z, w] := rfl
@[local simp] theorem dot_nil_left : dot ([] : IntList) ys = 0 := rfl
@[simp] theorem dot_nil_right : dot xs ([] : IntList) = 0 := by simp [dot]
@[simp] theorem dot_cons₂ : dot (x::xs) (y::ys) = x * y + dot xs ys := rfl
-- theorem dot_comm (xs ys : IntList) : dot xs ys = dot ys xs := by
-- rw [dot, dot, mul_comm]
@[simp] theorem dot_set_left (xs ys : IntList) (i : Nat) (z : Int) :
dot (xs.set i z) ys = dot xs ys + (z - xs.get i) * ys.get i := by
induction xs generalizing i ys with
| nil =>
induction i generalizing ys with
| zero => cases ys <;> simp
| succ i => cases ys <;> simp_all
| cons x xs ih =>
induction i generalizing ys with
| zero =>
cases ys with
| nil => simp
| cons y ys =>
simp only [Nat.zero_eq, set_cons_zero, dot_cons₂, get_cons_zero, Int.sub_mul]
rw [Int.add_right_comm, Int.add_comm (x * y), Int.sub_add_cancel]
| succ i =>
cases ys with
| nil => simp
| cons y ys => simp_all [Int.add_assoc]
theorem dot_distrib_left (xs ys zs : IntList) : (xs + ys).dot zs = xs.dot zs + ys.dot zs := by
simp [dot, mul_distrib_left, sum_add]
@[simp] theorem dot_neg_left (xs ys : IntList) : (-xs).dot ys = -(xs.dot ys) := by
simp [dot, mul_neg_left]
@[simp] theorem dot_smul_left (xs ys : IntList) (i : Int) : (i * xs).dot ys = i * xs.dot ys := by
simp [dot]
theorem dot_of_left_zero (w : x, x xs x = 0) : dot xs ys = 0 := by
induction xs generalizing ys with
| nil => simp
| cons x xs ih =>
cases ys with
| nil => simp
| cons y ys =>
rw [dot_cons₂, w x (by simp), ih]
· simp
· intro x m
apply w
exact List.mem_cons_of_mem _ m
/-- Division of an `IntList` by a integer. -/
def sdiv (xs : IntList) (g : Int) : IntList := xs.map fun x => x / g
@[simp] theorem sdiv_nil : sdiv [] g = [] := rfl
@[simp] theorem sdiv_cons : sdiv (x::xs) g = (x / g) :: sdiv xs g := rfl
/-- The gcd of the absolute values of the entries of an `IntList`. -/
def gcd (xs : IntList) : Nat := xs.foldr (fun x g => Nat.gcd x.natAbs g) 0
@[simp] theorem gcd_nil : gcd [] = 0 := rfl
@[simp] theorem gcd_cons : gcd (x :: xs) = Nat.gcd x.natAbs (gcd xs) := rfl
theorem gcd_cons_div_left : (gcd (x::xs) : Int) x := by
simp only [gcd, List.foldr_cons, Int.ofNat_dvd_left]
apply Nat.gcd_dvd_left
theorem gcd_cons_div_right : gcd (x::xs) gcd xs := by
simp only [gcd, List.foldr_cons]
apply Nat.gcd_dvd_right
theorem gcd_cons_div_right' : (gcd (x::xs) : Int) (gcd xs : Int) := by
rw [Int.ofNat_dvd_left, Int.natAbs_ofNat]
exact gcd_cons_div_right
theorem gcd_dvd (xs : IntList) {a : Int} (m : a xs) : (xs.gcd : Int) a := by
rw [Int.ofNat_dvd_left]
induction m with
| head =>
simp only [gcd_cons]
apply Nat.gcd_dvd_left
| tail b m ih => -- FIXME: why is the argument of tail implicit?
simp only [gcd_cons]
exact Nat.dvd_trans (Nat.gcd_dvd_right _ _) ih
theorem dvd_gcd (xs : IntList) (c : Nat) (w : {a : Int}, a xs (c : Int) a) :
c xs.gcd := by
simp only [Int.ofNat_dvd_left] at w
induction xs with
| nil => have := Nat.dvd_zero c; simp at this; exact this
| cons x xs ih =>
simp
apply Nat.dvd_gcd
· apply w
simp
· apply ih
intro b m
apply w
exact List.mem_cons_of_mem x m
theorem gcd_eq_iff (xs : IntList) (g : Nat) :
xs.gcd = g
( {a : Int}, a xs (g : Int) a)
( (c : Nat), ( {a : Int}, a xs (c : Int) a) c g) := by
constructor
· rintro rfl
exact gcd_dvd _, dvd_gcd _
· rintro hi, hg
apply Nat.dvd_antisymm
· apply hg
intro i m
exact gcd_dvd xs m
· exact dvd_gcd xs g hi
attribute [simp] Int.zero_dvd
@[simp] theorem gcd_eq_zero (xs : IntList) : xs.gcd = 0 x, x xs x = 0 := by
simp [gcd_eq_iff, Nat.dvd_zero]
@[simp] theorem dot_mod_gcd_left (xs ys : IntList) : dot xs ys % xs.gcd = 0 := by
induction xs generalizing ys with
| nil => simp
| cons x xs ih =>
cases ys with
| nil => simp
| cons y ys =>
rw [dot_cons₂, Int.add_emod,
Int.emod_emod_of_dvd (x * y) (gcd_cons_div_left),
Int.emod_emod_of_dvd (dot xs ys) (Int.ofNat_dvd.mpr gcd_cons_div_right)]
simp_all
theorem gcd_dvd_dot_left (xs ys : IntList) : (xs.gcd : Int) dot xs ys :=
Int.dvd_of_emod_eq_zero (dot_mod_gcd_left xs ys)
@[simp]
theorem dot_eq_zero_of_left_eq_zero {xs ys : IntList} (h : x, x xs x = 0) : dot xs ys = 0 := by
induction xs generalizing ys with
| nil => rfl
| cons x xs ih =>
cases ys with
| nil => rfl
| cons y ys =>
rw [dot_cons₂, h x (List.mem_cons_self _ _), ih (fun x m => h x (List.mem_cons_of_mem _ m)),
Int.zero_mul, Int.add_zero]
theorem dot_sdiv_left (xs ys : IntList) {d : Int} (h : d xs.gcd) :
dot (xs.sdiv d) ys = (dot xs ys) / d := by
induction xs generalizing ys with
| nil => simp
| cons x xs ih =>
cases ys with
| nil => simp
| cons y ys =>
have wx : d x := Int.dvd_trans h (gcd_cons_div_left)
have wxy : d x * y := Int.dvd_trans wx (Int.dvd_mul_right x y)
have w : d (IntList.gcd xs : Int) := Int.dvd_trans h (gcd_cons_div_right')
simp_all [Int.add_ediv_of_dvd_left, Int.mul_ediv_assoc']
/-- Apply "balanced mod" to each entry in an `IntList`. -/
abbrev bmod (x : IntList) (m : Nat) : IntList := x.map (Int.bmod · m)
theorem bmod_length (x : IntList) (m) : (bmod x m).length x.length :=
Nat.le_of_eq (List.length_map _ _)
/--
The difference between the balanced mod of a dot product,
and the dot product with balanced mod applied to each entry of the left factor.
-/
abbrev bmod_dot_sub_dot_bmod (m : Nat) (a b : IntList) : Int :=
(Int.bmod (dot a b) m) - dot (bmod a m) b
theorem dvd_bmod_dot_sub_dot_bmod (m : Nat) (xs ys : IntList) :
(m : Int) bmod_dot_sub_dot_bmod m xs ys := by
dsimp [bmod_dot_sub_dot_bmod]
rw [Int.dvd_iff_emod_eq_zero]
induction xs generalizing ys with
| nil => simp
| cons x xs ih =>
cases ys with
| nil => simp
| cons y ys =>
simp only [IntList.dot_cons₂, List.map_cons]
specialize ih ys
rw [Int.sub_emod, Int.bmod_emod] at ih
rw [Int.sub_emod, Int.bmod_emod, Int.add_emod, Int.add_emod (Int.bmod x m * y),
Int.sub_emod, Int.sub_sub, Int.sub_eq_add_neg, Int.sub_eq_add_neg,
Int.add_assoc (x * y % m), Int.add_comm (IntList.dot _ _ % m), Int.add_assoc,
Int.add_assoc, Int.sub_eq_add_neg, Int.sub_eq_add_neg, Int.add_emod, ih, Int.add_zero,
Int.emod_emod, Int.mul_emod, Int.mul_emod (Int.bmod x m), Int.bmod_emod, Int.sub_self,
Int.zero_emod]
end IntList
end Lean.Omega

View File

@@ -0,0 +1,178 @@
/-
Copyright (c) 2023 Lean FRO, LLC. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Scott Morrison
-/
prelude
import Init.Omega.Coeffs
/-!
# Linear combinations
We use this data structure while processing hypotheses.
-/
namespace Lean.Omega
/-- Internal representation of a linear combination of atoms, and a constant term. -/
structure LinearCombo where
/-- Constant term. -/
const : Int := 0
/-- Coefficients of the atoms. -/
coeffs : Coeffs := []
deriving DecidableEq, Repr
namespace LinearCombo
instance : ToString LinearCombo where
toString lc :=
s!"{lc.const}{String.join <| lc.coeffs.toList.enum.map fun ⟨i, c⟩ => s!" + {c} * x{i+1}"}"
instance : Inhabited LinearCombo := {const := 1}
theorem ext {a b : LinearCombo} (w₁ : a.const = b.const) (w₂ : a.coeffs = b.coeffs) :
a = b := by
cases a; cases b
subst w₁; subst w₂
congr
/--
Evaluate a linear combination `⟨r, [c_1, …, c_k]⟩` at values `[v_1, …, v_k]` to obtain
`r + (c_1 * x_1 + (c_2 * x_2 + ... (c_k * x_k + 0))))`.
-/
def eval (lc : LinearCombo) (values : Coeffs) : Int :=
lc.const + lc.coeffs.dot values
@[simp] theorem eval_nil : (lc : LinearCombo).eval .nil = lc.const := by
simp [eval]
/-- The `i`-th coordinate function. -/
def coordinate (i : Nat) : LinearCombo where
const := 0
coeffs := Coeffs.set .nil i 1
@[simp] theorem coordinate_eval (i : Nat) (v : Coeffs) :
(coordinate i).eval v = v.get i := by
simp [eval, coordinate]
theorem coordinate_eval_0 : (coordinate 0).eval (.ofList (a0 :: t)) = a0 := by simp
theorem coordinate_eval_1 : (coordinate 1).eval (.ofList (a0 :: a1 :: t)) = a1 := by simp
theorem coordinate_eval_2 : (coordinate 2).eval (.ofList (a0 :: a1 :: a2 :: t)) = a2 := by simp
theorem coordinate_eval_3 :
(coordinate 3).eval (.ofList (a0 :: a1 :: a2 :: a3 :: t)) = a3 := by simp
theorem coordinate_eval_4 :
(coordinate 4).eval (.ofList (a0 :: a1 :: a2 :: a3 :: a4 :: t)) = a4 := by simp
theorem coordinate_eval_5 :
(coordinate 5).eval (.ofList (a0 :: a1 :: a2 :: a3 :: a4 :: a5 :: t)) = a5 := by simp
theorem coordinate_eval_6 :
(coordinate 6).eval (.ofList (a0 :: a1 :: a2 :: a3 :: a4 :: a5 :: a6 :: t)) = a6 := by simp
theorem coordinate_eval_7 :
(coordinate 7).eval
(.ofList (a0 :: a1 :: a2 :: a3 :: a4 :: a5 :: a6 :: a7 :: t)) = a7 := by simp
theorem coordinate_eval_8 :
(coordinate 8).eval
(.ofList (a0 :: a1 :: a2 :: a3 :: a4 :: a5 :: a6 :: a7 :: a8 :: t)) = a8 := by simp
theorem coordinate_eval_9 :
(coordinate 9).eval
(.ofList (a0 :: a1 :: a2 :: a3 :: a4 :: a5 :: a6 :: a7 :: a8 :: a9 :: t)) = a9 := by simp
/-- Implementation of addition on `LinearCombo`. -/
def add (l₁ l₂ : LinearCombo) : LinearCombo where
const := l₁.const + l₂.const
coeffs := l₁.coeffs + l₂.coeffs
instance : Add LinearCombo := add
@[simp] theorem add_const {l₁ l₂ : LinearCombo} : (l₁ + l₂).const = l₁.const + l₂.const := rfl
@[simp] theorem add_coeffs {l₁ l₂ : LinearCombo} : (l₁ + l₂).coeffs = l₁.coeffs + l₂.coeffs := rfl
/-- Implementation of subtraction on `LinearCombo`. -/
def sub (l₁ l₂ : LinearCombo) : LinearCombo where
const := l₁.const - l₂.const
coeffs := l₁.coeffs - l₂.coeffs
instance : Sub LinearCombo := sub
@[simp] theorem sub_const {l₁ l₂ : LinearCombo} : (l₁ - l₂).const = l₁.const - l₂.const := rfl
@[simp] theorem sub_coeffs {l₁ l₂ : LinearCombo} : (l₁ - l₂).coeffs = l₁.coeffs - l₂.coeffs := rfl
/-- Implementation of negation on `LinearCombo`. -/
def neg (lc : LinearCombo) : LinearCombo where
const := -lc.const
coeffs := -lc.coeffs
instance : Neg LinearCombo := neg
@[simp] theorem neg_const {l : LinearCombo} : (-l).const = -l.const := rfl
@[simp] theorem neg_coeffs {l : LinearCombo} : (-l).coeffs = -l.coeffs := rfl
theorem sub_eq_add_neg (l₁ l₂ : LinearCombo) : l₁ - l₂ = l₁ + -l₂ := by
rcases l₁ with a₁, c₁; rcases l₂ with a₂, c₂
apply ext
· simp [Int.sub_eq_add_neg]
· simp [Coeffs.sub_eq_add_neg]
/-- Implementation of scalar multiplication of a `LinearCombo` by an `Int`. -/
def smul (lc : LinearCombo) (i : Int) : LinearCombo where
const := i * lc.const
coeffs := lc.coeffs.smul i
instance : HMul Int LinearCombo LinearCombo := fun i lc => lc.smul i
@[simp] theorem smul_const {lc : LinearCombo} {i : Int} : (i * lc).const = i * lc.const := rfl
@[simp] theorem smul_coeffs {lc : LinearCombo} {i : Int} : (i * lc).coeffs = i * lc.coeffs := rfl
@[simp] theorem add_eval (l₁ l₂ : LinearCombo) (v : Coeffs) :
(l₁ + l₂).eval v = l₁.eval v + l₂.eval v := by
rcases l₁ with r₁, c₁; rcases l₂ with r₂, c₂
simp only [eval, add_const, add_coeffs, Int.add_assoc, Int.add_left_comm]
congr
exact Coeffs.dot_distrib_left c₁ c₂ v
@[simp] theorem neg_eval (lc : LinearCombo) (v : Coeffs) : (-lc).eval v = - lc.eval v := by
rcases lc with a, coeffs
simp [eval, Int.neg_add]
@[simp] theorem sub_eval (l₁ l₂ : LinearCombo) (v : Coeffs) :
(l₁ - l₂).eval v = l₁.eval v - l₂.eval v := by
simp [sub_eq_add_neg, Int.sub_eq_add_neg]
@[simp] theorem smul_eval (lc : LinearCombo) (i : Int) (v : Coeffs) :
(i * lc).eval v = i * lc.eval v := by
rcases lc with a, coeffs
simp [eval, Int.mul_add]
theorem smul_eval_comm (lc : LinearCombo) (i : Int) (v : Coeffs) :
(i * lc).eval v = lc.eval v * i := by
simp [Int.mul_comm]
/--
Multiplication of two linear combinations.
This is useful only if at least one of the linear combinations is constant,
and otherwise should be considered as a junk value.
-/
def mul (l₁ l₂ : LinearCombo) : LinearCombo :=
l₂.const * l₁ + l₁.const * l₂ - { const := l₁.const * l₂.const }
theorem mul_eval_of_const_left (l₁ l₂ : LinearCombo) (v : Coeffs) (w : l₁.coeffs.isZero) :
(mul l₁ l₂).eval v = l₁.eval v * l₂.eval v := by
have : Coeffs.dot l₁.coeffs v = 0 := IntList.dot_of_left_zero w
simp [mul, eval, this, Coeffs.sub_eq_add_neg, Coeffs.dot_distrib_left, Int.add_mul, Int.mul_add,
Int.mul_comm]
theorem mul_eval_of_const_right (l₁ l₂ : LinearCombo) (v : Coeffs) (w : l₂.coeffs.isZero) :
(mul l₁ l₂).eval v = l₁.eval v * l₂.eval v := by
have : Coeffs.dot l₂.coeffs v = 0 := IntList.dot_of_left_zero w
simp [mul, eval, this, Coeffs.sub_eq_add_neg, Coeffs.dot_distrib_left, Int.add_mul, Int.mul_add,
Int.mul_comm]
theorem mul_eval (l₁ l₂ : LinearCombo) (v : Coeffs) (w : l₁.coeffs.isZero l₂.coeffs.isZero) :
(mul l₁ l₂).eval v = l₁.eval v * l₂.eval v := by
rcases w with w | w
· rw [mul_eval_of_const_left _ _ _ w]
· rw [mul_eval_of_const_right _ _ _ w]
end LinearCombo
end Lean.Omega

50
src/Init/Omega/Logic.lean Normal file
View File

@@ -0,0 +1,50 @@
/-
Copyright (c) 2023 Lean FRO, LLC. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Scott Morrison
-/
prelude
import Init.PropLemmas
/-!
# Specializations of basic logic lemmas
These are useful for `omega` while constructing proofs, but not considered generally useful
so are hidden in the `Std.Tactic.Omega` namespace.
If you find yourself needing them elsewhere, please move them first to another file.
-/
namespace Lean.Omega
theorem and_not_not_of_not_or (h : ¬ (p q)) : ¬ p ¬ q := not_or.mp h
theorem Decidable.or_not_not_of_not_and [Decidable p] [Decidable q]
(h : ¬ (p q)) : ¬ p ¬ q :=
(Decidable.not_and_iff_or_not _ _).mp h
theorem Decidable.and_or_not_and_not_of_iff {p q : Prop} [Decidable q] (h : p q) :
(p q) (¬p ¬q) := Decidable.iff_iff_and_or_not_and_not.mp h
theorem Decidable.not_iff_iff_and_not_or_not_and [Decidable a] [Decidable b] :
(¬ (a b)) (a ¬ b) ((¬ a) b) :=
fun e => if hb : b then
.inr fun ha => e fun _ => hb, fun _ => ha, hb
else
.inl if ha : a then ha else False.elim (e fun ha' => absurd ha' ha, fun hb' => absurd hb' hb), hb,
Or.rec (And.rec fun ha nb w => nb (w.mp ha)) (And.rec fun na hb w => na (w.mpr hb))
theorem Decidable.and_not_or_not_and_of_not_iff [Decidable a] [Decidable b]
(h : ¬ (a b)) : a ¬b ¬a b :=
Decidable.not_iff_iff_and_not_or_not_and.mp h
theorem Decidable.and_not_of_not_imp [Decidable a] (h : ¬(a b)) : a ¬b :=
Decidable.not_imp_iff_and_not.mp h
theorem ite_disjunction {α : Type u} {P : Prop} [Decidable P] {a b : α} :
(P (if P then a else b) = a) (¬ P (if P then a else b) = b) :=
if h : P then
.inl h, if_pos h
else
.inr h, if_neg h
end Lean.Omega

View File

@@ -972,6 +972,40 @@ macro "haveI" d:haveDecl : tactic => `(tactic| refine_lift haveI $d:haveDecl; ?_
/-- `letI` behaves like `let`, but inlines the value instead of producing a `let_fun` term. -/
macro "letI" d:haveDecl : tactic => `(tactic| refine_lift letI $d:haveDecl; ?_)
/--
The `omega` tactic, for resolving integer and natural linear arithmetic problems.
It is not yet a full decision procedure (no "dark" or "grey" shadows),
but should be effective on many problems.
We handle hypotheses of the form `x = y`, `x < y`, `x ≤ y`, and `k x` for `x y` in `Nat` or `Int`
(and `k` a literal), along with negations of these statements.
We decompose the sides of the inequalities as linear combinations of atoms.
If we encounter `x / k` or `x % k` for literal integers `k` we introduce new auxiliary variables
and the relevant inequalities.
On the first pass, we do not perform case splits on natural subtraction.
If `omega` fails, we recursively perform a case split on
a natural subtraction appearing in a hypothesis, and try again.
The options
```
omega (config :=
{ splitDisjunctions := true, splitNatSub := true, splitNatAbs := true, splitMinMax := true })
```
can be used to:
* `splitDisjunctions`: split any disjunctions found in the context,
if the problem is not otherwise solvable.
* `splitNatSub`: for each appearance of `((a - b : Nat) : Int)`, split on `a ≤ b` if necessary.
* `splitNatAbs`: for each appearance of `Int.natAbs a`, split on `0 ≤ a` if necessary.
* `splitMinMax`: for each occurrence of `min a b`, split on `min a b = a min a b = b`
Currently, all of these are on by default.
-/
syntax (name := omega) "omega" (config)? : tactic
end Tactic
namespace Attr

View File

@@ -30,3 +30,4 @@ import Lean.Elab.Tactic.Repeat
import Lean.Elab.Tactic.Ext
import Lean.Elab.Tactic.Change
import Lean.Elab.Tactic.FalseOrByContra
import Lean.Elab.Tactic.Omega

View File

@@ -382,6 +382,10 @@ then set the new goals to be the resulting goal list.-/
else
replaceMainGoal []
/-- Analogue of `liftMetaTactic` for tactics that do not return any goals. -/
@[inline] def liftMetaFinishingTactic (tac : MVarId MetaM Unit) : TacticM Unit :=
liftMetaTactic fun g => do tac g; pure []
def tryTactic? (tactic : TacticM α) : TacticM (Option α) := do
try
pure (some ( tactic))

View File

@@ -0,0 +1,192 @@
/-
Copyright (c) 2023 Lean FRO, LLC. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Scott Morrison
-/
import Lean.Elab.Tactic.Omega.Frontend
/-!
# `omega`
This is an implementation of the `omega` algorithm, currently without "dark" and "grey" shadows,
although the framework should be compatible with adding that part of the algorithm later.
The implementation closely follows William Pugh's
"The omega test: a fast and practical integer programming algorithm for dependence analysis"
https://doi.org/10.1145/125826.125848.
The `MetaM` level `omega` tactic takes a `List Expr` of facts,
and tries to discharge the goal by proving `False`.
The user-facing `omega` tactic first calls `false_or_by_contra`, and then invokes the `omega` tactic
on all hypotheses.
### Pre-processing
In the `false_or_by_contra` step, we:
* if the goal is `False`, do nothing,
* if the goal is `¬ P`, introduce `P`,
* if the goal is `x ≠ y`, introduce `x = y`,
* otherwise, for a goal `P`, replace it with `¬ ¬ P` and introduce `¬ P`.
The `omega` tactic pre-processes the hypotheses in the following ways:
* Replace `x > y` for `x y : Nat` or `x y : Int` with `x ≥ y + 1`.
* Given `x ≥ y` for `x : Nat`, replace it with `(x : Int) ≥ (y : Int)`.
* Push `Nat`-to-`Int` coercions inwards across `+`, `*`, `/`, `%`.
* Replace `k x` for a literal `k : Nat` with `x % k = 0`,
and replace `¬ k x` with `x % k > 0`.
* If `x / m` appears, for some `x : Int` and literal `m : Nat`,
replace `x / m` with a new variable `α` and add the constraints
`0 ≤ - m * α + x ≤ m - 1`.
* If `x % m` appears, similarly, introduce the same new contraints
but replace `x % m` with `- m * α + x`.
* Split conjunctions, existentials, and subtypes.
* Record, for each appearance of `(a - b : Int)` with `a b : Nat`, the disjunction
`b ≤ a ∧ ((a - b : Nat) : Int) = a - b a < b ∧ ((a - b : Nat) : Int) = 0`.
We don't immediately split this; see the discussion below for how disjunctions are handled.
After this preprocessing stage, we have a collection of linear inequalities
(all using `≤` rather than `<`) and equalities in some set of atoms.
TODO: We should identify atoms up to associativity and commutativity,
so that `omega` can handle problems such as `a * b < 0 && b * a > 0 → False`.
This should be a relatively easy modification of the `lookup` function in `OmegaM`.
After doing so, we could allow the preprocessor to distribute multiplication over addition.
### Normalization
Throughout the remainder of the algorithm, we apply the following normalization steps to
all linear constraints:
* Make the leading coefficient positive (thus giving us both upper and lower bounds).
* Divide through by the GCD of the coefficients, rounding the constant terms appropriately.
* Whenever we have both an upper and lower bound for the same coefficients,
check they are compatible. If they are tight, mark the pair of constraints as an equality.
If they are inconsistent, stop further processing.
### Solving equalities
The next step is to solve all equalities.
We first solve any equalities that have a `±1` coefficient;
these allow us to eliminate that variable.
After this, there may be equalities remaining with all coefficients having absolute value greater
than one. We select an equality `c₀ + ∑ cᵢ * xᵢ = 0` with smallest minimal absolute value
of the `cᵢ`, breaking ties by preferring equalities with smallest maximal absolute value.
We let `m = cⱼ| + 1` where `cⱼ` is the coefficient with smallest absolute value..
We then add the new equality `(bmod c₀ m) + ∑ (bmod cᵢ m) xᵢ = m α` with `α` being a new variable.
Here `bmod` is "balanced mod", taking values in `[- m/2, (m - 1)/2]`.
This equality holds (for some value of `α`) because the left hand side differs from the left hand
side of the original equality by a multiple of `m`.
Moreover, in this equality the coefficient of `xⱼ` is `±1`, so we can solve and eliminate `xⱼ`.
So far this isn't progress: we've introduced a new variable and eliminated a variable.
However, this process terminates, as the pair `(c, C)` lexicographically decreases,
where `c` is the smallest minimal absolute value and `C` is the smallest maximal absolute value
amongst those equalities with minimal absolute value `c`.
(Happily because we're running this in metaprogramming code, we don't actually need to prove this
termination! If we later want to upgrade to a decision procedure, or to produce counterexamples
we would need to do this. It's certainly possible, and existed in an earlier prototype version.)
### Solving inequalities
After solving all equalities, we turn to the inequalities.
We need to select a variable to eliminate; this choice is discussed below.
#### Shadows
The omega algorithm indicates we should consider three subproblems,
called the "real", "dark", and "grey" shadows.
(In fact the "grey" shadow is a disjunction of potentially many problems.)
Our problem is satisfiable if and only if the real shadow is satisfiable
and either the dark or grey shadow is satisfiable.
Currently we do not implement either the dark or grey shadows, and thus if the real shadow is
satisfiable we must fail, and report that we couldn't find a contradiction, even though the
problem may be unsatisfiable.
In practical problems, it appears to be relatively rare that we fail because of not handling the
dark and grey shadows.
Fortunately, in many cases it is possible to choose a variable to eliminate such that
the real and dark shadows coincide, and the grey shadows are empty. In this situation
we don't lose anything by ignoring the dark and grey shadows.
We call this situation an exact elimination.
A sufficient condition for exactness is that either all upper bounds on `xᵢ` have unit coefficient,
or all lower bounds on `xᵢ` have unit coefficient.
We always prefer to select the value of `i` so that this condition holds, if possible.
We break ties by preferring to select a value of `i` that minimizes the number of new constraints
introduced in the real shadow.
#### The real shadow: Fourier-Motzkin elimination
The real shadow for a variable `i` is just the Fourier-Motzkin elimination.
We begin by discarding all inequalities involving the variable `i`.
Then, for each pair of constraints `f ≤ c * xᵢ` and `c' * xᵢ ≤ f'`
with both `c` and `c'` positive (i.e. for each pair of an lower and upper bound on `xᵢ`)
we introduce the new constraint `c * f' - c' * f ≥ 0`.
(Note that if there are only upper bounds on `xᵢ`, or only lower bounds on `xᵢ` this step
simply discards inequalities.)
#### The dark and grey shadows
For each such new constraint `c' * f - c * f' ≤ 0`, we either have the strengthening
`c * f' - c' * f ≥ c * c' - c - c' + 1`
or we do not, i.e.
`c * f' - c' * f ≤ c * c' - c - c'`.
In the latter case, combining this inequality with `f' ≥ c' * xᵢ` we obtain
`c' * (c * xᵢ - f) ≤ c * c' - c - c'`
and as we already have `c * xᵢ - f ≥ 0`,
we conclude that `c * xᵢ - f = j` for some `j = 0, 1, ..., (c * c' - c - c')/c'`
(with, as usual the division rounded down).
Note that the largest possible value of `j` occurs with `c'` is as large as possible.
Thus the "dark" shadow is the variant of the real shadow where we replace each new inequality
with its strengthening.
The "grey" shadows are the union of the problems obtained by taking
a lower bound `f ≤ c * xᵢ` for `xᵢ` and some `j = 0, 1, ..., (c * m - c - m)/m`, where `m`
is the largest coefficient `c'` appearing in an upper bound `c' * xᵢ ≤ f'` for `xᵢ`,
and adding to the original problem (i.e. without doing any Fourier-Motzkin elimination) the single
new equation `c * xᵢ - f = j`, and the inequalities
`c * xᵢ - f > (c * m - c - m)/m` for each previously considered lower bound.
As stated above, the satisfiability of the original problem is in fact equivalent to
the satisfiability of the real shadow, *and* the satisfiability of *either* the dark shadow,
or at least one of the grey shadows.
TODO: implement the dark and grey shadows!
### Disjunctions
The omega algorithm as described above accumulates various disjunctions,
either coming from natural subtraction, or from the dark and grey shadows.
When we encounter such a disjunction, we store it in a list of disjunctions,
but do not immediately split it.
Instead we first try to find a contradiction (i.e. by eliminating equalities and inequalities)
without the disjunctive hypothesis.
If this fails, we then retrieve the first disjunction from the list, split it,
and try to find a contradiction in both branches.
(Note that we make no attempt to optimize the order in which we split disjunctions:
it's currently on a first in first out basis.)
The work done eliminating equalities can be reused when splitting disjunctions,
but we need to redo all the work eliminating inequalities in each branch.
## Future work
* Implementation of dark and grey shadows.
* Identification of atoms up to associativity and commutativity of monomials.
* Further optimization.
* Some data is recomputed unnecessarily, e.g. the GCDs of coefficients.
* Sparse representation of coefficients.
* I have a branch in which this is implemented, modulo some proofs about algebraic operations
on sparse arrays.
* Case splitting on `Int.abs`?
-/

View File

@@ -0,0 +1,565 @@
/-
Copyright (c) 2023 Lean FRO, LLC. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Scott Morrison
-/
import Lean.Elab.Tactic.Omega.OmegaM
import Lean.Elab.Tactic.Omega.MinNatAbs
open Lean (HashMap HashSet)
namespace Lean.Elab.Tactic.Omega
initialize Lean.registerTraceClass `omega
open Lean (Expr)
open Lean Meta Omega
open Lean in
instance : ToExpr LinearCombo where
toExpr lc :=
(Expr.const ``LinearCombo.mk []).app (toExpr lc.const) |>.app (toExpr lc.coeffs)
toTypeExpr := .const ``LinearCombo []
open Lean in
instance : ToExpr Constraint where
toExpr s :=
(Expr.const ``Constraint.mk []).app (toExpr s.lowerBound) |>.app (toExpr s.upperBound)
toTypeExpr := .const ``Constraint []
/--
A delayed proof that a constraint is satisfied at the atoms.
-/
abbrev Proof : Type := OmegaM Expr
/--
Our internal representation of an argument "justifying" that a constraint holds on some coefficients.
We'll use this to construct the proof term once a contradiction is found.
-/
inductive Justification : Constraint Coeffs Type
/--
`Problem.assumptions[i]` generates a proof that `s.sat' coeffs atoms`
-/
| assumption (s : Constraint) (x : Coeffs) (i : Nat) : Justification s x
/-- The result of `tidy` on another `Justification`. -/
| tidy (j : Justification s c) : Justification (tidyConstraint s c) (tidyCoeffs s c)
/-- The result of `combine` on two `Justifications`. -/
| combine {s t c} (j : Justification s c) (k : Justification t c) : Justification (s.combine t) c
/-- A linear `combo` of two `Justifications`. -/
| combo {s t x y} (a : Int) (j : Justification s x) (b : Int) (k : Justification t y) :
Justification (Constraint.combo a s b t) (Coeffs.combo a x b y)
/--
The justification for the constraing constructed using "balanced mod" while
eliminating an equality.
-/
| bmod (m : Nat) (r : Int) (i : Nat) {x} (j : Justification (.exact r) x) :
Justification (.exact (Int.bmod r m)) (bmod_coeffs m i x)
/-- Wrapping for `Justification.tidy` when `tidy?` is nonempty. -/
nonrec def Justification.tidy? (j : Justification s c) : Option (Σ s' c', Justification s' c') :=
match tidy? (s, c) with
| some _ => some _, _, tidy j
| none => none
namespace Justification
private def bullet (s : String) := "" ++ s.replace "\n" "\n "
/-- Print a `Justification` as an indented tree structure. -/
def toString : Justification s x String
| assumption _ _ i => s!"{x} ∈ {s}: assumption {i}"
| @tidy s' x' j =>
if s == s' && x == x' then j.toString else s!"{x} ∈ {s}: tidying up:\n" ++ bullet j.toString
| combine j k =>
s!"{x} ∈ {s}: combination of:\n" ++ bullet j.toString ++ "\n" ++ bullet k.toString
| combo a j b k =>
s!"{x} ∈ {s}: {a} * x + {b} * y combo of:\n" ++ bullet j.toString ++ "\n" ++ bullet k.toString
| bmod m _ i j =>
s!"{x} ∈ {s}: bmod with m={m} and i={i} of:\n" ++ bullet j.toString
instance : ToString (Justification s x) where toString := toString
open Lean
/-- Construct the proof term associated to a `tidy` step. -/
def tidyProof (s : Constraint) (x : Coeffs) (v : Expr) (prf : Expr) : Expr :=
mkApp4 (.const ``tidy_sat []) (toExpr s) (toExpr x) v prf
/-- Construct the proof term associated to a `combine` step. -/
def combineProof (s t : Constraint) (x : Coeffs) (v : Expr) (ps pt : Expr) : Expr :=
mkApp6 (.const ``Constraint.combine_sat' []) (toExpr s) (toExpr t) (toExpr x) v ps pt
/-- Construct the proof term associated to a `combo` step. -/
def comboProof (s t : Constraint) (a : Int) (x : Coeffs) (b : Int) (y : Coeffs)
(v : Expr) (px py : Expr) : Expr :=
mkApp9 (.const ``combo_sat' []) (toExpr s) (toExpr t) (toExpr a) (toExpr x) (toExpr b) (toExpr y)
v px py
/-- Construct the proof term associated to a `bmod` step. -/
def bmodProof (m : Nat) (r : Int) (i : Nat) (x : Coeffs) (v : Expr) (w : Expr) : MetaM Expr := do
let m := toExpr m
let r := toExpr r
let i := toExpr i
let x := toExpr x
let h mkDecideProof (mkApp4 (.const ``LE.le [.zero]) (.const ``Nat []) (.const ``instLENat [])
(.app (.const ``Coeffs.length []) x) i)
let lhs := mkApp2 (.const ``Coeffs.get []) v i
let rhs := mkApp3 (.const ``bmod_div_term []) m x v
let p mkEqReflWithExpectedType lhs rhs
return mkApp8 (.const ``bmod_sat []) m r i x v h p w
-- TODO could we increase sharing in the proof term here?
/-- Constructs a proof that `s.sat' c v = true` -/
def proof (v : Expr) (assumptions : Array Proof) : Justification s c Proof
| assumption s c i => assumptions[i]!
| @tidy s c j => return tidyProof s c v ( proof v assumptions j)
| @combine s t c j k =>
return combineProof s t c v ( proof v assumptions j) ( proof v assumptions k)
| @combo s t x y a j b k =>
return comboProof s t a x b y v ( proof v assumptions j) ( proof v assumptions k)
| @bmod m r i x j => do bmodProof m r i x v ( proof v assumptions j)
end Justification
/-- A `Justification` bundled together with its parameters. -/
structure Fact where
/-- The coefficients of a constraint. -/
coeffs : Coeffs
/-- The constraint. -/
constraint : Constraint
/-- The justification of a derived fact. -/
justification : Justification constraint coeffs
namespace Fact
instance : ToString Fact where
toString f := toString f.justification
/-- `tidy`, implemented on `Fact`. -/
def tidy (f : Fact) : Fact :=
match f.justification.tidy? with
| some _, _, justification => { justification }
| none => f
/-- `combo`, implemented on `Fact`. -/
def combo (a : Int) (f : Fact) (b : Int) (g : Fact) : Fact :=
{ justification := .combo a f.justification b g.justification }
end Fact
/--
A `omega` problem.
This is a hybrid structure:
it contains both `Expr` objects giving proofs of the "ground" assumptions
(or rather continuations which will produce the proofs when needed)
and internal representations of the linear constraints that we manipulate in the algorithm.
While the algorithm is running we do not synthesize any new `Expr` proofs: proof extraction happens
only once we've found a contradiction.
-/
structure Problem where
/-- The ground assumptions that the algorithm starts from. -/
assumptions : Array Proof :=
/-- The number of variables in the problem. -/
numVars : Nat := 0
/-- The current constraints, indexed by their coefficients. -/
constraints : HashMap Coeffs Fact :=
/--
The coefficients for which `constraints` contains an exact constraint (i.e. an equality).
-/
equalities : HashSet Coeffs :=
/--
Equations that have already been used to eliminate variables,
along with the variable which was removed, and its coefficient (either `1` or `-1`).
The earlier elements are more recent,
so if these are being reapplied it is essential to use `List.foldr`.
-/
eliminations : List (Fact × Nat × Int) := []
/-- Whether the problem is possible. -/
possible : Bool := true
/-- If the problem is impossible, then `proveFalse?` will contain a proof of `False`. -/
proveFalse? : Option Proof := none
/-- Invariant between `possible` and `proveFalse?`. -/
proveFalse?_spec : possible || proveFalse?.isSome := by rfl
/--
If we have found a contradiction,
`explanation?` will contain a human readable account of the deriviation.
-/
explanation? : Thunk String := ""
namespace Problem
/-- Check if a problem has no constraints. -/
def isEmpty (p : Problem) : Bool := p.constraints.isEmpty
instance : ToString Problem where
toString p :=
if p.possible then
if p.isEmpty then
"trivial"
else
"\n".intercalate <|
(p.constraints.toList.map fun coeffs, _, cst, _ => s!"{coeffs} ∈ {cst}")
else
"impossible"
open Lean in
/--
Takes a proof that `s.sat' x v` for some `s` such that `s.isImpossible`,
and constructs a proof of `False`.
-/
def proveFalse {s x} (j : Justification s x) (assumptions : Array Proof) : Proof := do
let v := atomsCoeffs
let prf j.proof v assumptions
let x := toExpr x
let s := toExpr s
let impossible
mkDecideProof ( mkEq (mkApp (.const ``Constraint.isImpossible []) s) (.const ``true []))
return mkApp5 (.const ``Constraint.not_sat'_of_isImpossible []) s impossible x v prf
/--
Insert a constraint into the problem,
without checking if there is already a constraint for these coefficients.
-/
def insertConstraint (p : Problem) : Fact Problem
| f@x, s, j =>
if s.isImpossible then
{ p with
possible := false
proveFalse? := some (proveFalse j p.assumptions)
explanation? := Thunk.mk fun _ => j.toString
proveFalse?_spec := rfl }
else
{ p with
numVars := max p.numVars x.length
constraints := p.constraints.insert x f
proveFalse?_spec := p.proveFalse?_spec
equalities :=
if f.constraint.isExact then
p.equalities.insert x
else
p.equalities }
/--
Add a constraint into the problem,
combining it with any existing constraints for the same coefficients.
-/
def addConstraint (p : Problem) : Fact Problem
| f@x, s, j =>
if p.possible then
match p.constraints.find? x with
| none =>
match s with
| .trivial => p
| _ => p.insertConstraint f
| some x', t, k =>
if h : x = x' then
let r := s.combine t
if r = t then
-- No need to overwrite the existing fact
-- with the same fact with a more complicated justification
p
else
if r = s then
-- The new constraint is strictly stronger, no need to combine with the old one:
p.insertConstraint x, s, j
else
p.insertConstraint x, s.combine t, j.combine (h k)
else
p -- unreachable
else
p
/--
Walk through the equalities, finding either the first equality with minimal coefficient `±1`,
or otherwise the equality with minimal `(r.minNatAbs, r.maxNatAbs)` (ordered lexicographically).
Returns the coefficients of the equality, along with the value of `minNatAbs`.
Although we don't need to run a termination proof here, it's nevertheless important that we use this
ordering so the algorithm terminates in practice!
-/
def selectEquality (p : Problem) : Option (Coeffs × Nat) :=
p.equalities.fold (init := none) fun
| none, c => (c, c.minNatAbs)
| some (r, m), c =>
if 2 m then
let m' := c.minNatAbs
if (m' < m || m' = m && c.maxNatAbs < r.maxNatAbs) then
(c, m')
else
(r, m)
else
(r, m)
/--
If we have already solved some equalities, apply those to some new `Fact`.
-/
def replayEliminations (p : Problem) (f : Fact) : Fact :=
p.eliminations.foldr (init := f) fun (f, i, s) g =>
match Coeffs.get g.coeffs i with
| 0 => g
| y => Fact.combo (-1 * s * y) f 1 g
/--
Solve an "easy" equality, i.e. one with a coefficient that is `±1`.
After solving, the variable will have been eliminated from all constraints.
-/
def solveEasyEquality (p : Problem) (c : Coeffs) : Problem :=
let i := c.findIdx? (·.natAbs = 1) |>.getD 0 -- findIdx? is always some
let sign := c.get i |> Int.sign
match p.constraints.find? c with
| some f =>
let init :=
{ assumptions := p.assumptions
eliminations := (f, i, sign) :: p.eliminations }
p.constraints.fold (init := init) fun p' coeffs g =>
match Coeffs.get coeffs i with
| 0 =>
p'.addConstraint g
| ci =>
let k := -1 * sign * ci
p'.addConstraint (Fact.combo k f 1 g).tidy
| _ => p -- unreachable
open Lean in
/--
We deal with a hard equality by introducing a new easy equality.
After solving the easy equality,
the minimum lexicographic value of `(c.minNatAbs, c.maxNatAbs)` will have been reduced.
-/
def dealWithHardEquality (p : Problem) (c : Coeffs) : OmegaM Problem :=
match p.constraints.find? c with
| some _, some r, some r', j => do
let m := c.minNatAbs + 1
-- We have to store the valid value of the newly introduced variable in the atoms.
let x := mkApp3 (.const ``bmod_div_term []) (toExpr m) (toExpr c) ( atomsCoeffs)
let (i, facts?) lookup x
if hr : r' = r then
match facts? with
| none => throwError "When solving hard equality, new atom had been seen before!"
| some facts => if ! facts.isEmpty then
throwError "When solving hard equality, there were unexpected new facts!"
return p.addConstraint { coeffs := _, constraint := _, justification := (hr j).bmod m r i }
else
throwError "Invalid constraint, expected an equation." -- unreachable
| _ =>
return p -- unreachable
/--
Solve an equality, by deciding whether it is easy (has a `±1` coefficient) or hard,
and delegating to the appropriate function.
-/
def solveEquality (p : Problem) (c : Coeffs) (m : Nat) : OmegaM Problem :=
if m = 1 then
return p.solveEasyEquality c
else
p.dealWithHardEquality c
/-- Recursively solve all equalities. -/
partial def solveEqualities (p : Problem) : OmegaM Problem :=
if p.possible then
match p.selectEquality with
| some (c, m) => do ( p.solveEquality c m).solveEqualities
| none => return p
else return p
open Constraint
open Lean in
/-- Constructing the proof term for `addInequality`. -/
def addInequality_proof (c : Int) (x : Coeffs) (p : Proof) : Proof := do
return mkApp4 (.const ``addInequality_sat []) (toExpr c) (toExpr x) ( atomsCoeffs) ( p)
open Lean in
/-- Constructing the proof term for `addEquality`. -/
def addEquality_proof (c : Int) (x : Coeffs) (p : Proof) : Proof := do
return mkApp4 (.const ``addEquality_sat []) (toExpr c) (toExpr x) ( atomsCoeffs) ( p)
/--
Helper function for adding an inequality of the form `const + Coeffs.dot coeffs atoms ≥ 0`
to a `Problem`.
(This is only used while initializing a `Problem`. During elimination we use `addConstraint`.)
-/
-- We are given `prf? : const + Coeffs.dot coeffs atoms ≥ 0`,
-- and need to transform this to `Coeffs.dot coeffs atoms ≥ -const`.
def addInequality (p : Problem) (const : Int) (coeffs : Coeffs) (prf? : Option Proof) : Problem :=
let prf := prf?.getD (do mkSorry ( mkFreshExprMVar none) false)
let i := p.assumptions.size
let p' := { p with assumptions := p.assumptions.push (addInequality_proof const coeffs prf) }
let f : Fact :=
{ coeffs
constraint := { lowerBound := some (-const), upperBound := none }
justification := .assumption _ _ i }
let f := p.replayEliminations f
let f := f.tidy
p'.addConstraint f
/--
Helper function for adding an equality of the form `const + Coeffs.dot coeffs atoms = 0`
to a `Problem`.
(This is only used while initializing a `Problem`. During elimination we use `addConstraint`.)
-/
def addEquality (p : Problem) (const : Int) (coeffs : Coeffs) (prf? : Option Proof) : Problem :=
let prf := prf?.getD (do mkSorry ( mkFreshExprMVar none) false)
let i := p.assumptions.size
let p' := { p with assumptions := p.assumptions.push (addEquality_proof const coeffs prf) }
let f : Fact :=
{ coeffs
constraint := { lowerBound := some (-const), upperBound := some (-const) }
justification := .assumption _ _ i }
let f := p.replayEliminations f
let f := f.tidy
p'.addConstraint f
/-- Folding `addInequality` over a list. -/
def addInequalities (p : Problem) (ineqs : List (Int × Coeffs × Option Proof)) : Problem :=
ineqs.foldl (init := p) fun p const, coeffs, prf? => p.addInequality const coeffs prf?
/-- Folding `addEquality` over a list. -/
def addEqualities (p : Problem) (eqs : List (Int × Coeffs × Option Proof)) : Problem :=
eqs.foldl (init := p) fun p const, coeffs, prf? => p.addEquality const coeffs prf?
/-- Representation of the data required to run Fourier-Motzkin elimination on a variable. -/
structure FourierMotzkinData where
/-- Which variable is being eliminated. -/
var : Nat
/-- The "irrelevant" facts which do not involve the target variable. -/
irrelevant : List Fact := []
/--
The facts which give a lower bound on the target variable,
and the coefficient of the target variable in each.
-/
lowerBounds : List (Fact × Int) := []
/--
The facts which give an upper bound on the target variable,
and the coefficient of the target variable in each.
-/
upperBounds : List (Fact × Int) := []
/--
Whether the elimination would be exact, because all of the lower bound coefficients are `±1`.
-/
lowerExact : Bool := true
/--
Whether the elimination would be exact, because all of the upper bound coefficients are `±1`.
-/
upperExact : Bool := true
deriving Inhabited
instance : ToString FourierMotzkinData where
toString d :=
let irrelevant := d.irrelevant.map fun x, s, _ => s!"{x} ∈ {s}"
let lowerBounds := d.lowerBounds.map fun x, s, _, _ => s!"{x} ∈ {s}"
let upperBounds := d.upperBounds.map fun x, s, _, _ => s!"{x} ∈ {s}"
s!"Fourier-Motzkin elimination data for variable {d.var}\n"
++ s!"• irrelevant: {irrelevant}\n"
++ s!"• lowerBounds: {lowerBounds}\n"
++ s!"• upperBounds: {upperBounds}"
/-- Is a Fourier-Motzkin elimination empty (i.e. there are no relevant constraints). -/
def FourierMotzkinData.isEmpty (d : FourierMotzkinData) : Bool :=
d.lowerBounds.isEmpty && d.upperBounds.isEmpty
/-- The number of new constraints that would be introduced by Fourier-Motzkin elimination. -/
def FourierMotzkinData.size (d : FourierMotzkinData) : Nat :=
d.lowerBounds.length * d.upperBounds.length
/-- Is the Fourier-Motzkin elimination known to be exact? -/
def FourierMotzkinData.exact (d : FourierMotzkinData) : Bool := d.lowerExact || d.upperExact
/-- Prepare the Fourier-Motzkin elimination data for each variable. -/
-- TODO we could short-circuit here, if we find one with `size = 0`.
def fourierMotzkinData (p : Problem) : Array FourierMotzkinData := Id.run do
let n := p.numVars
let mut data : Array FourierMotzkinData :=
(List.range p.numVars).foldl (fun a i => a.push { var := i}) #[]
for (_, f@xs, s, _) in p.constraints.toList do -- We could make a forIn instance for HashMap
for i in [0:n] do
let x := Coeffs.get xs i
data := data.modify i fun d =>
if x = 0 then
{ d with irrelevant := f :: d.irrelevant }
else Id.run do
let s' := s.scale x
let mut d' := d
if s'.lowerBound.isSome then
d' := { d' with
lowerBounds := (f, x) :: d'.lowerBounds
lowerExact := d'.lowerExact && x.natAbs = 1 }
if s'.upperBound.isSome then
d' := { d' with
upperBounds := (f, x) :: d'.upperBounds
upperExact := d'.upperExact && x.natAbs = 1 }
return d'
return data
/--
Decides which variable to run Fourier-Motzkin elimination on, and returns the associated data.
We prefer eliminations which introduce no new inequalities, or otherwise exact eliminations,
and break ties by the number of new inequalities introduced.
-/
def fourierMotzkinSelect (data : Array FourierMotzkinData) : FourierMotzkinData := Id.run do
let data := data.filter fun d => ¬ d.isEmpty
let mut bestIdx := 0
let mut bestSize := data[0]!.size
let mut bestExact := data[0]!.exact
if bestSize = 0 then return data[0]!
for i in [1:data.size] do
let exact := data[i]!.exact
let size := data[i]!.size
if size = 0 || !bestExact && exact || size < bestSize then
if size = 0 then return data[i]!
bestIdx := i
bestExact := exact
bestSize := size
return data[bestIdx]!
/--
Run Fourier-Motzkin elimination on one variable.
-/
def fourierMotzkin (p : Problem) : Problem := Id.run do
let data := p.fourierMotzkinData
-- Now perform the elimination.
let _, irrelevant, lower, upper, _, _ := fourierMotzkinSelect data
let mut r : Problem := { assumptions := p.assumptions, eliminations := p.eliminations }
for f in irrelevant do
r := r.insertConstraint f
for f, b in lower do
for g, a in upper do
r := r.addConstraint (Fact.combo a f (-b) g).tidy
return r
mutual
/--
Run the `omega` algorithm (for now without dark and grey shadows!) on a problem.
-/
partial def runOmega (p : Problem) : OmegaM Problem := do
trace[omega] "Running omega on:\n{p}"
if p.possible then
let p' p.solveEqualities
elimination p'
else
return p
/-- As for `runOmega`, but assuming the first round of solving equalities has already happened. -/
partial def elimination (p : Problem) : OmegaM Problem :=
if p.possible then
if p.isEmpty then
return p
else do
trace[omega] "Running Fourier-Motzkin elimination on:\n{p}"
runOmega p.fourierMotzkin
else
return p
end
end Problem
end Lean.Elab.Tactic.Omega

View File

@@ -0,0 +1,542 @@
/-
Copyright (c) 2023 Lean FRO, LLC. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Scott Morrison
-/
import Lean.Elab.Tactic.Omega.Core
import Lean.Elab.Tactic.FalseOrByContra
import Lean.Meta.Tactic.Cases
import Lean.Elab.Tactic.Config
/-!
# Frontend to the `omega` tactic.
See `Lean.Elab.Tactic.Omega` for an overview of the tactic.
-/
open Lean Meta Omega
namespace Lean.Elab.Tactic.Omega
/--
Allow elaboration of `OmegaConfig` arguments to tactics.
-/
declare_config_elab elabOmegaConfig Lean.Meta.Omega.OmegaConfig
/--
A partially processed `omega` context.
We have:
* a `Problem` representing the integer linear constraints extracted so far, and their proofs
* the unprocessed `facts : List Expr` taken from the local context,
* the unprocessed `disjunctions : List Expr`,
which will only be split one at a time if we can't otherwise find a contradiction.
We begin with `facts := ← getLocalHyps` and `problem := .trivial`,
and progressively process the facts.
As we process the facts, we may generate additional facts
(e.g. about coercions and integer divisions).
To avoid duplicates, we maintain a `HashSet` of previously processed facts.
-/
structure MetaProblem where
/-- An integer linear arithmetic problem. -/
problem : Problem := {}
/-- Pending facts which have not been processed yet. -/
facts : List Expr := []
/--
Pending disjunctions, which we will case split one at a time if we can't get a contradiction.
-/
disjunctions : List Expr := []
/-- Facts which have already been processed; we keep these to avoid duplicates. -/
processedFacts : HashSet Expr :=
/-- Construct the `rfl` proof that `lc.eval atoms = e`. -/
def mkEvalRflProof (e : Expr) (lc : LinearCombo) : OmegaM Expr := do
mkEqReflWithExpectedType e (mkApp2 (.const ``LinearCombo.eval []) (toExpr lc) ( atomsCoeffs))
/-- If `e : Expr` is the `n`-th atom, construct the proof that
`e = (coordinate n).eval atoms`. -/
def mkCoordinateEvalAtomsEq (e : Expr) (n : Nat) : OmegaM Expr := do
if n < 10 then
let atoms := ( getThe State).atoms
let tail mkListLit (.const ``Int []) atoms[n+1:].toArray.toList
let lem := .str ``LinearCombo s!"coordinate_eval_{n}"
mkEqSymm (mkAppN (.const lem []) (atoms[:n+1].toArray.push tail))
else
let atoms atomsCoeffs
let n := toExpr n
-- Construct the `rfl` proof that `e = (atoms.get? n).getD 0`
let eq mkEqReflWithExpectedType e (mkApp2 (.const ``Coeffs.get []) atoms n)
mkEqTrans eq ( mkEqSymm (mkApp2 (.const ``LinearCombo.coordinate_eval []) n atoms))
/-- Construct the linear combination (and its associated proof and new facts) for an atom. -/
def mkAtomLinearCombo (e : Expr) : OmegaM (LinearCombo × OmegaM Expr × HashSet Expr) := do
let (n, facts) lookup e
return LinearCombo.coordinate n, mkCoordinateEvalAtomsEq e n, facts.getD
-- This has been PR'd as
-- https://github.com/leanprover/lean4/pull/2900
-- and can be removed when that is merged.
@[inherit_doc mkAppN]
local macro_rules
| `(mkAppN $f #[$xs,*]) => (xs.getElems.foldlM (fun x e => `(Expr.app $x $e)) f : MacroM Term)
mutual
/--
Wrapper for `asLinearComboImpl`,
using a cache for previously visited expressions.
Gives a small (10%) speedup in testing.
I tried using a pointer based cache,
but there was never enough subexpression sharing to make it effective.
-/
partial def asLinearCombo (e : Expr) : OmegaM (LinearCombo × OmegaM Expr × HashSet Expr) := do
let cache get
match cache.find? e with
| some (lc, prf) =>
trace[omega] "Found in cache: {e}"
return (lc, prf, )
| none =>
let r asLinearComboImpl e
modifyThe Cache fun cache => (cache.insert e (r.1, r.2.1.run' cache))
pure r
/--
Translates an expression into a `LinearCombo`.
Also returns:
* a proof that this linear combo evaluated at the atoms is equal to the original expression
* a list of new facts which should be recorded:
* for each new atom `a` of the form `((x : Nat) : Int)`, the fact that `0 ≤ a`
* for each new atom `a` of the form `x / k`, for `k` a positive numeral, the facts that
`k * a ≤ x < (k + 1) * a`
* for each new atom of the form `((a - b : Nat) : Int)`, the fact:
`b ≤ a ∧ ((a - b : Nat) : Int) = a - b a < b ∧ ((a - b : Nat) : Int) = 0`
We also transform the expression as we descend into it:
* pushing coercions: `↑(x + y)`, `↑(x * y)`, `↑(x / k)`, `↑(x % k)`, `↑k`
* unfolding `emod`: `x % k` → `x - x / k`
-/
partial def asLinearComboImpl (e : Expr) : OmegaM (LinearCombo × OmegaM Expr × HashSet Expr) := do
trace[omega] "processing {e}"
match e.int? with
| some i =>
let lc := {const := i}
return lc, mkEvalRflProof e lc,
| none =>
if e.isFVar then
if let some v e.fvarId!.getValue? then
rewrite e ( mkEqReflWithExpectedType e v)
else
mkAtomLinearCombo e
else match e.getAppFnArgs with
| (``HAdd.hAdd, #[_, _, _, _, e₁, e₂]) => do
let (l₁, prf₁, facts₁) asLinearCombo e₁
let (l₂, prf₂, facts₂) asLinearCombo e₂
let prf : OmegaM Expr := do
let add_eval :=
mkApp3 (.const ``LinearCombo.add_eval []) (toExpr l₁) (toExpr l₂) ( atomsCoeffs)
mkEqTrans
( mkAppM ``Int.add_congr #[ prf₁, prf₂])
( mkEqSymm add_eval)
pure (l₁ + l₂, prf, facts₁.merge facts₂)
| (``HSub.hSub, #[_, _, _, _, e₁, e₂]) => do
let (l₁, prf₁, facts₁) asLinearCombo e₁
let (l₂, prf₂, facts₂) asLinearCombo e₂
let prf : OmegaM Expr := do
let sub_eval :=
mkApp3 (.const ``LinearCombo.sub_eval []) (toExpr l₁) (toExpr l₂) ( atomsCoeffs)
mkEqTrans
( mkAppM ``Int.sub_congr #[ prf₁, prf₂])
( mkEqSymm sub_eval)
pure (l₁ - l₂, prf, facts₁.merge facts₂)
| (``Neg.neg, #[_, _, e']) => do
let (l, prf, facts) asLinearCombo e'
let prf' : OmegaM Expr := do
let neg_eval := mkApp2 (.const ``LinearCombo.neg_eval []) (toExpr l) ( atomsCoeffs)
mkEqTrans
( mkAppM ``Int.neg_congr #[ prf])
( mkEqSymm neg_eval)
pure (-l, prf', facts)
| (``HMul.hMul, #[_, _, _, _, x, y]) =>
-- If we decide not to expand out the multiplication,
-- we have to revert the `OmegaM` state so that any new facts about the factors
-- can still be reported when they are visited elsewhere.
let r? commitWhen do
let (xl, xprf, xfacts) asLinearCombo x
let (yl, yprf, yfacts) asLinearCombo y
if xl.coeffs.isZero yl.coeffs.isZero then
let prf : OmegaM Expr := do
let h mkDecideProof (mkApp2 (.const ``Or [])
(.app (.const ``Coeffs.isZero []) (toExpr xl.coeffs))
(.app (.const ``Coeffs.isZero []) (toExpr yl.coeffs)))
let mul_eval :=
mkApp4 (.const ``LinearCombo.mul_eval []) (toExpr xl) (toExpr yl) ( atomsCoeffs) h
mkEqTrans
( mkAppM ``Int.mul_congr #[ xprf, yprf])
( mkEqSymm mul_eval)
pure (some (LinearCombo.mul xl yl, prf, xfacts.merge yfacts), true)
else
pure (none, false)
match r? with
| some r => pure r
| none => mkAtomLinearCombo e
| (``HMod.hMod, #[_, _, _, _, n, k]) =>
match natCast? k with
| some _ => rewrite e (mkApp2 (.const ``Int.emod_def []) n k)
| none => mkAtomLinearCombo e
| (``HDiv.hDiv, #[_, _, _, _, x, z]) =>
match intCast? z with
| some 0 => rewrite e (mkApp (.const ``Int.ediv_zero []) x)
| some i =>
if i < 0 then
rewrite e (mkApp2 (.const ``Int.ediv_neg []) x (toExpr (-i)))
else
mkAtomLinearCombo e
| _ => mkAtomLinearCombo e
| (``Min.min, #[_, _, a, b]) =>
if ( cfg).splitMinMax then
rewrite e (mkApp2 (.const ``Int.min_def []) a b)
else
mkAtomLinearCombo e
| (``Max.max, #[_, _, a, b]) =>
if ( cfg).splitMinMax then
rewrite e (mkApp2 (.const ``Int.max_def []) a b)
else
mkAtomLinearCombo e
| (``Nat.cast, #[.const ``Int [], i, n]) =>
match n with
| .fvar h =>
if let some v h.getValue? then
rewrite e ( mkEqReflWithExpectedType e
(mkApp3 (.const ``Nat.cast [0]) (.const ``Int []) i v))
else
mkAtomLinearCombo e
| _ => match n.getAppFnArgs with
| (``Nat.succ, #[n]) => rewrite e (.app (.const ``Int.ofNat_succ []) n)
| (``HAdd.hAdd, #[_, _, _, _, a, b]) => rewrite e (mkApp2 (.const ``Int.ofNat_add []) a b)
| (``HMul.hMul, #[_, _, _, _, a, b]) => rewrite e (mkApp2 (.const ``Int.ofNat_mul []) a b)
| (``HDiv.hDiv, #[_, _, _, _, a, b]) => rewrite e (mkApp2 (.const ``Int.ofNat_ediv []) a b)
| (``OfNat.ofNat, #[_, n, _]) => rewrite e (.app (.const ``Int.natCast_ofNat []) n)
| (``HMod.hMod, #[_, _, _, _, a, b]) => rewrite e (mkApp2 (.const ``Int.ofNat_emod []) a b)
| (``HSub.hSub, #[_, _, _, _, mkAppN (.const ``HSub.hSub _) #[_, _, _, _, a, b], c]) =>
rewrite e (mkApp3 (.const ``Int.ofNat_sub_sub []) a b c)
| (``Prod.fst, #[_, β, p]) => match p with
| .app (.app (.app (.app (.const ``Prod.mk [0, v]) _) _) x) y =>
rewrite e (mkApp3 (.const ``Int.ofNat_fst_mk [v]) β x y)
| _ => mkAtomLinearCombo e
| (``Prod.snd, #[α, _, p]) => match p with
| .app (.app (.app (.app (.const ``Prod.mk [u, 0]) _) _) x) y =>
rewrite e (mkApp3 (.const ``Int.ofNat_snd_mk [u]) α x y)
| _ => mkAtomLinearCombo e
| (``Min.min, #[_, _, a, b]) => rewrite e (mkApp2 (.const ``Int.ofNat_min []) a b)
| (``Max.max, #[_, _, a, b]) => rewrite e (mkApp2 (.const ``Int.ofNat_max []) a b)
| (``Int.natAbs, #[n]) =>
if ( cfg).splitNatAbs then
rewrite e (mkApp (.const ``Int.ofNat_natAbs []) n)
else
mkAtomLinearCombo e
| _ => mkAtomLinearCombo e
| (``Prod.fst, #[α, β, p]) => match p with
| .app (.app (.app (.app (.const ``Prod.mk [u, v]) _) _) x) y =>
rewrite e (mkApp4 (.const ``Prod.fst_mk [u, v]) α x β y)
| _ => mkAtomLinearCombo e
| (``Prod.snd, #[α, β, p]) => match p with
| .app (.app (.app (.app (.const ``Prod.mk [u, v]) _) _) x) y =>
rewrite e (mkApp4 (.const ``Prod.snd_mk [u, v]) α x β y)
| _ => mkAtomLinearCombo e
| _ => mkAtomLinearCombo e
where
/--
Apply a rewrite rule to an expression, and interpret the result as a `LinearCombo`.
(We're not rewriting any subexpressions here, just the top level, for efficiency.)
-/
rewrite (lhs rw : Expr) : OmegaM (LinearCombo × OmegaM Expr × HashSet Expr) := do
trace[omega] "rewriting {lhs} via {rw} : {← inferType rw}"
match ( inferType rw).eq? with
| some (_, _lhs', rhs) =>
let (lc, prf, facts) asLinearCombo rhs
let prf' : OmegaM Expr := do mkEqTrans rw ( prf)
pure (lc, prf', facts)
| none => panic! "Invalid rewrite rule in 'asLinearCombo'"
end
namespace MetaProblem
/-- The trivial `MetaProblem`, with no facts to processs and a trivial `Problem`. -/
def trivial : MetaProblem where
problem := {}
instance : Inhabited MetaProblem := trivial
/--
Add an integer equality to the `Problem`.
We solve equalities as they are discovered, as this often results in an earlier contradiction.
-/
def addIntEquality (p : MetaProblem) (h x : Expr) : OmegaM MetaProblem := do
let (lc, prf, facts) asLinearCombo x
let newFacts : HashSet Expr := facts.fold (init := ) fun s e =>
if p.processedFacts.contains e then s else s.insert e
trace[omega] "Adding proof of {lc} = 0"
pure <|
{ p with
facts := newFacts.toList ++ p.facts
problem := (p.problem.addEquality lc.const lc.coeffs
(some do mkEqTrans ( mkEqSymm ( prf)) h)) |>.solveEqualities }
/--
Add an integer inequality to the `Problem`.
We solve equalities as they are discovered, as this often results in an earlier contradiction.
-/
def addIntInequality (p : MetaProblem) (h y : Expr) : OmegaM MetaProblem := do
let (lc, prf, facts) asLinearCombo y
let newFacts : HashSet Expr := facts.fold (init := ) fun s e =>
if p.processedFacts.contains e then s else s.insert e
trace[omega] "Adding proof of {lc} ≥ 0"
pure <|
{ p with
facts := newFacts.toList ++ p.facts
problem := (p.problem.addInequality lc.const lc.coeffs
(some do mkAppM ``le_of_le_of_eq #[h, ( prf)])) |>.solveEqualities }
/-- Given a fact `h` with type `¬ P`, return a more useful fact obtained by pushing the negation. -/
def pushNot (h P : Expr) : MetaM (Option Expr) := do
let P whnfR P
match P with
| .forallE _ t b _ =>
if ( isProp t) && ( isProp b) then
return some (mkApp4 (.const ``Decidable.and_not_of_not_imp []) t b
(.app (.const ``Classical.propDecidable []) t) h)
else
return none
| .app _ _ =>
match P.getAppFnArgs with
| (``LT.lt, #[.const ``Int [], _, x, y]) =>
return some (mkApp3 (.const ``Int.le_of_not_lt []) x y h)
| (``LE.le, #[.const ``Int [], _, x, y]) =>
return some (mkApp3 (.const ``Int.lt_of_not_le []) x y h)
| (``LT.lt, #[.const ``Nat [], _, x, y]) =>
return some (mkApp3 (.const ``Nat.le_of_not_lt []) x y h)
| (``LE.le, #[.const ``Nat [], _, x, y]) =>
return some (mkApp3 (.const ``Nat.lt_of_not_le []) x y h)
| (``Eq, #[.const ``Nat [], x, y]) =>
return some (mkApp3 (.const ``Nat.lt_or_gt_of_ne []) x y h)
| (``Eq, #[.const ``Int [], x, y]) =>
return some (mkApp3 (.const ``Int.lt_or_gt_of_ne []) x y h)
| (``Prod.Lex, _) => return some ( mkAppM ``Prod.of_not_lex #[h])
| (``Dvd.dvd, #[.const ``Nat [], _, k, x]) =>
return some (mkApp3 (.const ``Nat.emod_pos_of_not_dvd []) k x h)
| (``Dvd.dvd, #[.const ``Int [], _, k, x]) =>
-- This introduces a disjunction that could be avoided by checking `k ≠ 0`.
return some (mkApp3 (.const ``Int.emod_pos_of_not_dvd []) k x h)
| (``Or, #[P₁, P₂]) => return some (mkApp3 (.const ``and_not_not_of_not_or []) P₁ P₂ h)
| (``And, #[P₁, P₂]) =>
return some (mkApp5 (.const ``Decidable.or_not_not_of_not_and []) P₁ P₂
(.app (.const ``Classical.propDecidable []) P₁)
(.app (.const ``Classical.propDecidable []) P₂) h)
| (``Not, #[P']) =>
return some (mkApp3 (.const ``Decidable.of_not_not []) P'
(.app (.const ``Classical.propDecidable []) P') h)
| (``Iff, #[P₁, P₂]) =>
return some (mkApp5 (.const ``Decidable.and_not_or_not_and_of_not_iff []) P₁ P₂
(.app (.const ``Classical.propDecidable []) P₁)
(.app (.const ``Classical.propDecidable []) P₂) h)
| _ => return none
| _ => return none
/--
Parse an `Expr` and extract facts, also returning the number of new facts found.
-/
partial def addFact (p : MetaProblem) (h : Expr) : OmegaM (MetaProblem × Nat) := do
if ! p.problem.possible then
return (p, 0)
else
let t instantiateMVars ( whnfR ( inferType h))
trace[omega] "adding fact: {t}"
match t with
| .forallE _ x y _ =>
if ( isProp x) && ( isProp y) then
p.addFact (mkApp4 (.const ``Decidable.not_or_of_imp []) x y
(.app (.const ``Classical.propDecidable []) x) h)
else
return (p, 0)
| .app _ _ =>
match t.getAppFnArgs with
| (``Eq, #[.const ``Int [], x, y]) =>
match y.int? with
| some 0 => pure ( p.addIntEquality h x, 1)
| _ => p.addFact (mkApp3 (.const ``Int.sub_eq_zero_of_eq []) x y h)
| (``LE.le, #[.const ``Int [], _, x, y]) =>
match x.int? with
| some 0 => pure ( p.addIntInequality h y, 1)
| _ => p.addFact (mkApp3 (.const ``Int.sub_nonneg_of_le []) y x h)
| (``LT.lt, #[.const ``Int [], _, x, y]) =>
p.addFact (mkApp3 (.const ``Int.add_one_le_of_lt []) x y h)
| (``GT.gt, #[.const ``Int [], _, x, y]) =>
p.addFact (mkApp3 (.const ``Int.lt_of_gt []) x y h)
| (``GE.ge, #[.const ``Int [], _, x, y]) =>
p.addFact (mkApp3 (.const ``Int.le_of_ge []) x y h)
| (``GT.gt, #[.const ``Nat [], _, x, y]) =>
p.addFact (mkApp3 (.const ``Nat.lt_of_gt []) x y h)
| (``GE.ge, #[.const ``Nat [], _, x, y]) =>
p.addFact (mkApp3 (.const ``Nat.le_of_ge []) x y h)
| (``Ne, #[.const ``Nat [], x, y]) =>
p.addFact (mkApp3 (.const ``Nat.lt_or_gt_of_ne []) x y h)
| (``Not, #[P]) => match pushNot h P with
| none => return (p, 0)
| some h' => p.addFact h'
| (``Eq, #[.const ``Nat [], x, y]) =>
p.addFact (mkApp3 (.const ``Int.ofNat_congr []) x y h)
| (``LT.lt, #[.const ``Nat [], _, x, y]) =>
p.addFact (mkApp3 (.const ``Int.ofNat_lt_of_lt []) x y h)
| (``LE.le, #[.const ``Nat [], _, x, y]) =>
p.addFact (mkApp3 (.const ``Int.ofNat_le_of_le []) x y h)
| (``Ne, #[.const ``Int [], x, y]) =>
p.addFact (mkApp3 (.const ``Int.lt_or_gt_of_ne []) x y h)
| (``Prod.Lex, _) => p.addFact ( mkAppM ``Prod.of_lex #[h])
| (``Dvd.dvd, #[.const ``Nat [], _, k, x]) =>
p.addFact (mkApp3 (.const ``Nat.mod_eq_zero_of_dvd []) k x h)
| (``Dvd.dvd, #[.const ``Int [], _, k, x]) =>
p.addFact (mkApp3 (.const ``Int.emod_eq_zero_of_dvd []) k x h)
| (``And, #[t₁, t₂]) => do
let (p₁, n₁) p.addFact (mkApp3 (.const ``And.left []) t₁ t₂ h)
let (p₂, n₂) p₁.addFact (mkApp3 (.const ``And.right []) t₁ t₂ h)
return (p₂, n₁ + n₂)
| (``Exists, #[α, P]) =>
p.addFact (mkApp3 (.const ``Exists.choose_spec [ getLevel α]) α P h)
| (``Subtype, #[α, P]) =>
p.addFact (mkApp3 (.const ``Subtype.property [ getLevel α]) α P h)
| (``Iff, #[P₁, P₂]) =>
p.addFact (mkApp4 (.const ``Decidable.and_or_not_and_not_of_iff [])
P₁ P₂ (.app (.const ``Classical.propDecidable []) P₂) h)
| (``Or, #[_, _]) =>
if ( cfg).splitDisjunctions then
return ({ p with disjunctions := p.disjunctions.insert h }, 1)
else
return (p, 0)
| _ => pure (p, 0)
| _ => pure (p, 0)
/--
Process all the facts in a `MetaProblem`, returning the new problem, and the number of new facts.
This is partial because new facts may be generated along the way.
-/
partial def processFacts (p : MetaProblem) : OmegaM (MetaProblem × Nat) := do
match p.facts with
| [] => pure (p, 0)
| h :: t =>
if p.processedFacts.contains h then
processFacts { p with facts := t }
else
let (p₁, n₁) MetaProblem.addFact { p with
facts := t
processedFacts := p.processedFacts.insert h } h
let (p₂, n₂) p₁.processFacts
return (p₂, n₁ + n₂)
end MetaProblem
/--
Given `p : P Q` (or any inductive type with two one-argument constructors),
split the goal into two subgoals:
one containing the hypothesis `h : P` and another containing `h : Q`.
-/
def cases₂ (mvarId : MVarId) (p : Expr) (hName : Name := `h) :
MetaM ((MVarId × FVarId) × (MVarId × FVarId)) := do
let mvarId mvarId.assert `hByCases ( inferType p) p
let (fvarId, mvarId) mvarId.intro1
let #[s₁, s₂] mvarId.cases fvarId #[{ varNames := [hName] }, { varNames := [hName] }] |
throwError "'cases' tactic failed, unexpected number of subgoals"
let #[Expr.fvar f₁ ..] pure s₁.fields
| throwError "'cases' tactic failed, unexpected new hypothesis"
let #[Expr.fvar f₂ ..] pure s₂.fields
| throwError "'cases' tactic failed, unexpected new hypothesis"
return ((s₁.mvarId, f₁), (s₂.mvarId, f₂))
mutual
/--
Split a disjunction in a `MetaProblem`, and if we find a new usable fact
call `omegaImpl` in both branches.
-/
partial def splitDisjunction (m : MetaProblem) (g : MVarId) : OmegaM Unit := g.withContext do
match m.disjunctions with
| [] => throwError "omega did not find a contradiction:\n{m.problem}"
| h :: t =>
trace[omega] "Case splitting on {← inferType h}"
let ctx getMCtx
let (g₁, h₁, g₂, h₂) cases₂ g h
trace[omega] "Adding facts:\n{← g₁.withContext <| inferType (.fvar h₁)}"
let m₁ := { m with facts := [.fvar h₁], disjunctions := t }
let r withoutModifyingState do
let (m₁, n) g₁.withContext m₁.processFacts
if 0 < n then
omegaImpl m₁ g₁
pure true
else
pure false
if r then
trace[omega] "Adding facts:\n{← g₂.withContext <| inferType (.fvar h₂)}"
let m₂ := { m with facts := [.fvar h₂], disjunctions := t }
omegaImpl m₂ g₂
else
trace[omega] "No new facts found."
setMCtx ctx
splitDisjunction { m with disjunctions := t } g
/-- Implementation of the `omega` algorithm, and handling disjunctions. -/
partial def omegaImpl (m : MetaProblem) (g : MVarId) : OmegaM Unit := g.withContext do
let (m, _) m.processFacts
guard m.facts.isEmpty
let p := m.problem
trace[omega] "Extracted linear arithmetic problem:\nAtoms: {← atomsList}\n{p}"
let p' if p.possible then p.elimination else pure p
trace[omega] "After elimination:\nAtoms: {← atomsList}\n{p'}"
match p'.possible, p'.proveFalse?, p'.proveFalse?_spec with
| true, _, _ =>
splitDisjunction m g
| false, .some prf, _ =>
trace[omega] "Justification:\n{p'.explanation?.get}"
let prf instantiateMVars ( prf)
trace[omega] "omega found a contradiction, proving {← inferType prf}"
trace[omega] "{prf}"
g.assign prf
end
/--
Given a collection of facts, try prove `False` using the omega algorithm,
and close the goal using that.
-/
def omega (facts : List Expr) (g : MVarId) (cfg : OmegaConfig := {}) : MetaM Unit :=
OmegaM.run (omegaImpl { facts } g) cfg
open Lean Elab Tactic Parser.Tactic
/-- The `omega` tactic, for resolving integer and natural linear arithmetic problems. -/
def omegaTactic (cfg : OmegaConfig) : TacticM Unit := do
liftMetaFinishingTactic fun g => do
let g g.falseOrByContra
(useClassical := false) -- because all the hypotheses we can make use of are decidable
g.withContext do
let hyps := ( getLocalHyps).toList
trace[omega] "analyzing {hyps.length} hypotheses:\n{← hyps.mapM inferType}"
omega hyps g cfg
/-- The `omega` tactic, for resolving integer and natural linear arithmetic problems. This
`TacticM Unit` frontend with default configuration can be used as an Aesop rule, for example via
the tactic call `aesop (add 50% tactic Std.Tactic.Omega.omegaDefault)`. -/
def omegaDefault : TacticM Unit := omegaTactic {}
@[builtin_tactic Lean.Parser.Tactic.omega]
def evalOmega : Tactic := fun
| `(tactic| omega $[$cfg]?) => do
let cfg elabOmegaConfig (mkOptionalNode cfg)
omegaTactic cfg
| _ => throwUnsupportedSyntax

View File

@@ -0,0 +1,139 @@
/-
Copyright (c) 2023 Lean FRO, LLC. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Scott Morrison
-/
/-!
# `List.nonzeroMinimum`, `List.minNatAbs`, `List.maxNatAbs`
`List.minNatAbs` computes the minimum non-zero absolute value of a `List Int`.
This is not generally useful outside of the implementation of the `omega` tactic,
so we keep it in the `Lean/Elab/Tactic/Omega` directory
(although the definitions are in the `List` namespace).
-/
namespace List
/--
The minimum non-zero entry in a list of natural numbers, or zero if all entries are zero.
We completely characterize the function via
`nonzeroMinimum_eq_zero_iff` and `nonzeroMinimum_eq_nonzero_iff` below.
-/
def nonzeroMinimum (xs : List Nat) : Nat := xs.filter (· 0) |>.minimum? |>.getD 0
-- A specialization of `minimum?_eq_some_iff` to Nat.
theorem minimum?_eq_some_iff' {xs : List Nat} :
xs.minimum? = some a (a xs b xs, a b) :=
minimum?_eq_some_iff
(le_refl := Nat.le_refl)
(min_eq_or := fun _ _ => Nat.min_def .. by split <;> simp)
(le_min_iff := fun _ _ _ => Nat.le_min)
open Classical in
@[simp] theorem nonzeroMinimum_eq_zero_iff {xs : List Nat} :
xs.nonzeroMinimum = 0 x xs, x = 0 := by
simp [nonzeroMinimum, Option.getD_eq_iff, minimum?_eq_none_iff, minimum?_eq_some_iff',
filter_eq_nil, mem_filter]
theorem nonzeroMinimum_mem {xs : List Nat} (w : xs.nonzeroMinimum 0) :
xs.nonzeroMinimum xs := by
dsimp [nonzeroMinimum] at *
generalize h : (xs.filter (· 0) |>.minimum?) = m at *
match m, w with
| some (m+1), _ => simp_all [minimum?_eq_some_iff', mem_filter]
theorem nonzeroMinimum_pos {xs : List Nat} (m : a xs) (h : a 0) : 0 < xs.nonzeroMinimum :=
Nat.pos_iff_ne_zero.mpr fun w => h (nonzeroMinimum_eq_zero_iff.mp w _ m)
theorem nonzeroMinimum_le {xs : List Nat} (m : a xs) (h : a 0) : xs.nonzeroMinimum a := by
have : (xs.filter (· 0) |>.minimum?) = some xs.nonzeroMinimum := by
have w := nonzeroMinimum_pos m h
dsimp [nonzeroMinimum] at *
generalize h : (xs.filter (· 0) |>.minimum?) = m? at *
match m?, w with
| some m?, _ => rfl
rw [minimum?_eq_some_iff'] at this
apply this.2
simp [List.mem_filter]
exact m, h
theorem nonzeroMinimum_eq_nonzero_iff {xs : List Nat} {y : Nat} (h : y 0) :
xs.nonzeroMinimum = y y xs ( x xs, y x x = 0) := by
constructor
· rintro rfl
constructor
exact nonzeroMinimum_mem h
intro y m
by_cases w : y = 0
· right; exact w
· left; apply nonzeroMinimum_le m w
· rintro m, w
apply Nat.le_antisymm
· exact nonzeroMinimum_le m h
· have nz : xs.nonzeroMinimum 0 := by
apply Nat.pos_iff_ne_zero.mp
apply nonzeroMinimum_pos m h
specialize w (nonzeroMinimum xs) (nonzeroMinimum_mem nz)
cases w with
| inl h => exact h
| inr h => exact False.elim (nz h)
theorem nonzeroMinimum_eq_of_nonzero {xs : List Nat} (h : xs.nonzeroMinimum 0) :
x xs, xs.nonzeroMinimum = x :=
xs.nonzeroMinimum, ((nonzeroMinimum_eq_nonzero_iff h).mp rfl).1, rfl
theorem nonzeroMinimum_le_iff {xs : List Nat} {y : Nat} :
xs.nonzeroMinimum y xs.nonzeroMinimum = 0 x xs, x y x 0 := by
refine fun h => ?_, fun h => ?_
· rw [Classical.or_iff_not_imp_right]
simp only [ne_eq, not_exists, not_and, Classical.not_not, nonzeroMinimum_eq_zero_iff]
intro w
apply nonzeroMinimum_eq_zero_iff.mp
if p : xs.nonzeroMinimum = 0 then
exact p
else
exact w _ (nonzeroMinimum_mem p) h
· match h with
| .inl h => simp [h]
| .inr x, m, le, ne => exact Nat.le_trans (nonzeroMinimum_le m ne) le
theorem nonzeroMininum_map_le_nonzeroMinimum (f : α β) (p : α Nat) (q : β Nat) (xs : List α)
(h : a, a xs (p a = 0 q (f a) = 0))
(w : a, a xs p a 0 q (f a) p a) :
((xs.map f).map q).nonzeroMinimum (xs.map p).nonzeroMinimum := by
rw [nonzeroMinimum_le_iff]
if z : (xs.map p).nonzeroMinimum = 0 then
rw [nonzeroMinimum_eq_zero_iff]
simp_all
else
have := nonzeroMinimum_eq_of_nonzero z
simp only [mem_map] at this
obtain x, a, m, rfl, eq := this
refine .inr q (f a), List.mem_map_of_mem _ (List.mem_map_of_mem _ m), ?_, ?_
· rw [eq] at z
apply w _ m z
· rwa [Ne, h _ m, eq]
/--
The minimum absolute value of a nonzero entry, or zero if all entries are zero.
We completely characterize the function via
`minNatAbs_eq_zero_iff` and `minNatAbs_eq_nonzero_iff` below.
-/
def minNatAbs (xs : List Int) : Nat := xs.map Int.natAbs |>.nonzeroMinimum
@[simp] theorem minNatAbs_eq_zero_iff {xs : List Int} : xs.minNatAbs = 0 y xs, y = 0 := by
simp [minNatAbs]
theorem minNatAbs_eq_nonzero_iff (xs : List Int) (w : z 0) :
xs.minNatAbs = z ( y xs, y.natAbs = z) ( y xs, z y.natAbs y = 0) := by
simp [minNatAbs, nonzeroMinimum_eq_nonzero_iff w]
@[simp] theorem minNatAbs_nil : ([] : List Int).minNatAbs = 0 := rfl
/-- The maximum absolute value in a list of integers. -/
def maxNatAbs (xs : List Int) : Nat := xs.map Int.natAbs |>.maximum? |>.getD 0

View File

@@ -0,0 +1,211 @@
/-
Copyright (c) 2023 Lean FRO, LLC. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Scott Morrison
-/
import Lean.Meta.AppBuilder
/-!
# The `OmegaM` state monad.
We keep track of the linear atoms (up to defeq) that have been encountered so far,
and also generate new facts as new atoms are recorded.
The main functions are:
* `atoms : OmegaM (List Expr)` which returns the atoms recorded so far
* `lookup (e : Expr) : OmegaM (Nat × Option (HashSet Expr))` which checks if an `Expr` has
already been recorded as an atom, and records it.
`lookup` return the index in `atoms` for this `Expr`.
The `Option (HashSet Expr)` return value is `none` is the expression has been previously
recorded, and otherwise contains new facts that should be added to the `omega` problem.
* for each new atom `a` of the form `((x : Nat) : Int)`, the fact that `0 ≤ a`
* for each new atom `a` of the form `x / k`, for `k` a positive numeral, the facts that
`k * a ≤ x < k * a + k`
* for each new atom of the form `((a - b : Nat) : Int)`, the fact:
`b ≤ a ∧ ((a - b : Nat) : Int) = a - b a < b ∧ ((a - b : Nat) : Int) = 0`
* for each new atom of the form `min a b`, the facts `min a b ≤ a` and `min a b ≤ b`
(and similarly for `max`)
* for each new atom of the form `if P then a else b`, the disjunction:
`(P ∧ (if P then a else b) = a) (¬ P ∧ (if P then a else b) = b)`
The `OmegaM` monad also keeps an internal cache of visited expressions
(not necessarily atoms, but arbitrary subexpressions of one side of a linear relation)
to reduce duplication.
The cache maps `Expr`s to pairs consisting of a `LinearCombo`,
and proof that the expression is equal to the evaluation of the `LinearCombo` at the atoms.
-/
open Lean Meta Omega
namespace Lean.Elab.Tactic.Omega
/-- Context for the `OmegaM` monad, containing the user configurable options. -/
structure Context where
/-- User configurable options for `omega`. -/
cfg : OmegaConfig
/-- The internal state for the `OmegaM` monad, recording previously encountered atoms. -/
structure State where
/-- The atoms up-to-defeq encountered so far. -/
atoms : Array Expr := #[]
/-- An intermediate layer in the `OmegaM` monad. -/
abbrev OmegaM' := StateRefT State (ReaderT Context MetaM)
/--
Cache of expressions that have been visited, and their reflection as a linear combination.
-/
def Cache : Type := HashMap Expr (LinearCombo × OmegaM' Expr)
/--
The `OmegaM` monad maintains two pieces of state:
* the linear atoms discovered while processing hypotheses
* a cache mapping subexpressions of one side of a linear inequality to `LinearCombo`s
(and a proof that the `LinearCombo` evaluates at the atoms to the original expression). -/
abbrev OmegaM := StateRefT Cache OmegaM'
/-- Run a computation in the `OmegaM` monad, starting with no recorded atoms. -/
def OmegaM.run (m : OmegaM α) (cfg : OmegaConfig) : MetaM α :=
m.run' HashMap.empty |>.run' {} { cfg }
/-- Retrieve the user-specified configuration options. -/
def cfg : OmegaM OmegaConfig := do pure ( read).cfg
/-- Retrieve the list of atoms. -/
def atoms : OmegaM (List Expr) := return ( getThe State).atoms.toList
/-- Return the `Expr` representing the list of atoms. -/
def atomsList : OmegaM Expr := do mkListLit (.const ``Int []) ( atoms)
/-- Return the `Expr` representing the list of atoms as a `Coeffs`. -/
def atomsCoeffs : OmegaM Expr := do
return .app (.const ``Coeffs.ofList []) ( atomsList)
/-- Run an `OmegaM` computation, restoring the state afterwards depending on the result. -/
def commitWhen (t : OmegaM (α × Bool)) : OmegaM α := do
let state getThe State
let cache getThe Cache
let (a, r) t
if !r then do
modifyThe State fun _ => state
modifyThe Cache fun _ => cache
pure a
/--
Run an `OmegaM` computation, restoring the state afterwards.
-/
def withoutModifyingState (t : OmegaM α) : OmegaM α :=
commitWhen (do pure ( t, false))
/-- Wrapper around `Expr.nat?` that also allows `Nat.cast`. -/
def natCast? (n : Expr) : Option Nat :=
match n.getAppFnArgs with
| (``Nat.cast, #[_, _, n]) => n.nat?
| _ => n.nat?
/-- Wrapper around `Expr.int?` that also allows `Nat.cast`. -/
def intCast? (n : Expr) : Option Int :=
match n.getAppFnArgs with
| (``Nat.cast, #[_, _, n]) => n.nat?
| _ => n.int?
/-- Construct the term with type hint `(Eq.refl a : a = b)`-/
def mkEqReflWithExpectedType (a b : Expr) : MetaM Expr := do
mkExpectedTypeHint ( mkEqRefl a) ( mkEq a b)
/--
Analyzes a newly recorded atom,
returning a collection of interesting facts about it that should be added to the context.
-/
def analyzeAtom (e : Expr) : OmegaM (HashSet Expr) := do
match e.getAppFnArgs with
| (``Nat.cast, #[.const ``Int [], _, e']) =>
-- Casts of natural numbers are non-negative.
let mut r := HashSet.empty.insert (Expr.app (.const ``Int.ofNat_nonneg []) e')
match ( cfg).splitNatSub, e'.getAppFnArgs with
| true, (``HSub.hSub, #[_, _, _, _, a, b]) =>
-- `((a - b : Nat) : Int)` gives a dichotomy
r := r.insert (mkApp2 (.const ``Int.ofNat_sub_dichotomy []) a b)
| _, (``Int.natAbs, #[x]) =>
r := r.insert (mkApp (.const ``Int.le_natAbs []) x)
r := r.insert (mkApp (.const ``Int.neg_le_natAbs []) x)
| _, (``Fin.val, #[n, i]) =>
r := r.insert (mkApp2 (.const ``Fin.isLt []) n i)
| _, _ => pure ()
return r
| (``HDiv.hDiv, #[_, _, _, _, x, k]) => match natCast? k with
| none
| some 0 => pure
| some _ =>
-- `k * x/k ≤ x < k * x/k + k`
let ne_zero := mkApp3 (.const ``Ne [1]) (.const ``Int []) k (toExpr (0 : Int))
let pos := mkApp4 (.const ``LT.lt [0]) (.const ``Int []) (.const ``Int.instLTInt [])
(toExpr (0 : Int)) k
pure <| HashSet.empty.insert
(mkApp3 (.const ``Int.mul_ediv_self_le []) x k ( mkDecideProof ne_zero)) |>.insert
(mkApp3 (.const ``Int.lt_mul_ediv_self_add []) x k ( mkDecideProof pos))
| (``HMod.hMod, #[_, _, _, _, x, k]) =>
match k.getAppFnArgs with
| (``HPow.hPow, #[_, _, _, _, b, exp]) => match natCast? b with
| none
| some 0 => pure
| some _ =>
let b_pos := mkApp4 (.const ``LT.lt [0]) (.const ``Int []) (.const ``Int.instLTInt [])
(toExpr (0 : Int)) b
let pow_pos := mkApp3 (.const ``Int.pos_pow_of_pos []) b exp ( mkDecideProof b_pos)
pure <| HashSet.empty.insert
(mkApp3 (.const ``Int.emod_nonneg []) x k
(mkApp3 (.const ``Int.ne_of_gt []) k (toExpr (0 : Int)) pow_pos)) |>.insert
(mkApp3 (.const ``Int.emod_lt_of_pos []) x k pow_pos)
| (``Nat.cast, #[.const ``Int [], _, k']) =>
match k'.getAppFnArgs with
| (``HPow.hPow, #[_, _, _, _, b, exp]) => match natCast? b with
| none
| some 0 => pure
| some _ =>
let b_pos := mkApp4 (.const ``LT.lt [0]) (.const ``Nat []) (.const ``instLTNat [])
(toExpr (0 : Nat)) b
let pow_pos := mkApp3 (.const ``Nat.pos_pow_of_pos []) b exp ( mkDecideProof b_pos)
let cast_pos := mkApp2 (.const ``Int.ofNat_pos_of_pos []) k' pow_pos
pure <| HashSet.empty.insert
(mkApp3 (.const ``Int.emod_nonneg []) x k
(mkApp3 (.const ``Int.ne_of_gt []) k (toExpr (0 : Int)) cast_pos)) |>.insert
(mkApp3 (.const ``Int.emod_lt_of_pos []) x k cast_pos)
| _ => pure
| _ => pure
| (``Min.min, #[_, _, x, y]) =>
pure <| HashSet.empty.insert (mkApp2 (.const ``Int.min_le_left []) x y) |>.insert
(mkApp2 (.const ``Int.min_le_right []) x y)
| (``Max.max, #[_, _, x, y]) =>
pure <| HashSet.empty.insert (mkApp2 (.const ``Int.le_max_left []) x y) |>.insert
(mkApp2 (.const ``Int.le_max_right []) x y)
| (``ite, #[α, i, dec, t, e]) =>
if α == (.const ``Int []) then
pure <| HashSet.empty.insert <| mkApp5 (.const ``ite_disjunction [0]) α i dec t e
else
pure {}
| _ => pure
/--
Look up an expression in the atoms, recording it if it has not previously appeared.
Return its index, and, if it is new, a collection of interesting facts about the atom.
* for each new atom `a` of the form `((x : Nat) : Int)`, the fact that `0 ≤ a`
* for each new atom `a` of the form `x / k`, for `k` a positive numeral, the facts that
`k * a ≤ x < k * a + k`
* for each new atom of the form `((a - b : Nat) : Int)`, the fact:
`b ≤ a ∧ ((a - b : Nat) : Int) = a - b a < b ∧ ((a - b : Nat) : Int) = 0`
-/
def lookup (e : Expr) : OmegaM (Nat × Option (HashSet Expr)) := do
let c getThe State
for h : i in [:c.atoms.size] do
if isDefEq e c.atoms[i] then
return (i, none)
trace[omega] "New atom: {e}"
let facts analyzeAtom e
if isTracingEnabledFor `omega then
unless facts.isEmpty do
trace[omega] "New facts: {← facts.toList.mapM fun e => inferType e}"
let i modifyGetThe State fun c => (c.atoms.size, { c with atoms := c.atoms.push e })
return (i, some facts)
end Omega

View File

@@ -501,6 +501,13 @@ export MonadLCtx (getLCtx)
instance [MonadLift m n] [MonadLCtx m] : MonadLCtx n where
getLCtx := liftM (getLCtx : m _)
/-- Return local hypotheses which are not "implementation detail", as `Expr`s. -/
def getLocalHyps [Monad m] [MonadLCtx m] : m (Array Expr) := do
let mut hs := #[]
for d in getLCtx do
if !d.isImplementationDetail then hs := hs.push d.toExpr
return hs
def LocalDecl.replaceFVarId (fvarId : FVarId) (e : Expr) (d : LocalDecl) : LocalDecl :=
if d.fvarId == fvarId then d
else match d with

View File

@@ -27,9 +27,6 @@ instance decidableBall (l : List α) : Decidable (∀ x, x ∈ l → p x) :=
end decidability_instances
@[inline] protected def List.insert {α : Type} [DecidableEq α] (a : α) (l : List α) : List α :=
if a l then l else a :: l
def parts : List (List Nat) := List.insert ([1, 1, 0, 0]) <| List.insert ([0, 0, 1, 1]) <|
List.insert ([1, 0, 0, 1]) <| List.insert ([1, 1, 1, 0]) <| List.insert ([1, 0, 0, 0]) <|
List.insert [1, 2, 3, 4] <| List.insert [5, 6, 7, 8] []

View File

@@ -1,12 +1,12 @@
def List.insert (p : α α Bool) (a : α) (bs : List α) : List α :=
def List.insert' (p : α α Bool) (a : α) (bs : List α) : List α :=
match bs with
| [] => [a]
| b :: bs' => if p a b then a :: bs else b :: bs'.insert p a
| b :: bs' => if p a b then a :: bs else b :: bs'.insert' p a
def List.merge (p : α α Bool) (as bs : List α) : List α :=
match as with
| [] => bs
| a :: as' => insert p a (merge p as' bs)
| a :: as' => insert' p a (merge p as' bs)
def List.split (as : List α) : List α × List α :=
match as with

382
tests/lean/run/omega.lean Normal file
View File

@@ -0,0 +1,382 @@
example : True := by
fail_if_success omega
trivial
-- set_option trace.omega true
example (_ : (1 : Int) < (0 : Int)) : False := by omega
example (_ : (0 : Int) < (0 : Int)) : False := by omega
example (_ : (0 : Int) < (1 : Int)) : True := by (fail_if_success omega); trivial
example {x : Int} (_ : 0 x) (_ : x 1) : True := by (fail_if_success omega); trivial
example {x : Int} (_ : 0 x) (_ : x -1) : False := by omega
example {x : Int} (_ : x % 2 < x - 2 * (x / 2)) : False := by omega
example {x : Int} (_ : x % 2 > 5) : False := by omega
example {x : Int} (_ : 2 * (x / 2) > x) : False := by omega
example {x : Int} (_ : 2 * (x / 2) x - 2) : False := by omega
example {x : Nat} : x / 0 = 0 := by omega
example {x : Int} : x / 0 = 0 := by omega
example {x : Int} : x / 2 + x / (-2) = 0 := by omega
example {x : Nat} (_ : x 0) : 0 < x := by omega
example {x y z : Nat} (_ : a c) (_ : b c) : a < Nat.succ c := by omega
example (_ : 7 < 3) : False := by omega
example (_ : 0 < 0) : False := by omega
example {x : Nat} (_ : x > 7) (_ : x < 3) : False := by omega
example {x : Nat} (_ : x 7) (_ : x 3) : False := by omega
example {x y : Nat} (_ : x + y > 10) (_ : x < 5) (_ : y < 5) : False := by omega
example {x y : Int} (_ : x + y > 10) (_ : 2 * x < 11) (_ : y < 5) : False := by omega
example {x y : Nat} (_ : x + y > 10) (_ : 2 * x < 11) (_ : y < 5) : False := by omega
example {x y : Int} (_ : 2 * x + 4 * y = 5) : False := by omega
example {x y : Nat} (_ : 2 * x + 4 * y = 5) : False := by omega
example {x y : Int} (_ : 6 * x + 7 * y = 5) : True := by (fail_if_success omega); trivial
example {x y : Nat} (_ : 6 * x + 7 * y = 5) : False := by omega
example {x y : Nat} (_ : x * 6 + y * 7 = 5) : False := by omega
example {x y : Nat} (_ : 2 * (3 * x) + y * 7 = 5) : False := by omega
example {x y : Nat} (_ : 2 * x * 3 + y * 7 = 5) : False := by omega
example {x y : Nat} (_ : 2 * 3 * x + y * 7 = 5) : False := by omega
example {x : Nat} (_ : x < 0) : False := by omega
example {x y z : Int} (_ : x + y > z) (_ : x < 0) (_ : y < 0) (_ : z > 0) : False := by omega
example {x y : Nat} (_ : x - y = 0) (_ : x > y) : False := by
fail_if_success omega (config := { splitNatSub := false })
omega
example {x y z : Int} (_ : x - y - z = 0) (_ : x > y + z) : False := by omega
example {x y z : Nat} (_ : x - y - z = 0) (_ : x > y + z) : False := by omega
example {a b c d e f : Nat} (_ : a - b - c - d - e - f = 0) (_ : a > b + c + d + e + f) :
False := by
omega
example {x y : Nat} (h₁ : x - y 0) (h₂ : y < x) : False := by omega
example {x y : Int} (_ : x / 2 - y / 3 < 1) (_ : 3 * x 2 * y + 6) : False := by omega
example {x y : Nat} (_ : x / 2 - y / 3 < 1) (_ : 3 * x 2 * y + 6) : False := by omega
example {x y : Nat} (_ : x / 2 - y / 3 < 1) (_ : 3 * x 2 * y + 4) : False := by omega
example {x y : Nat} (_ : x / 2 - y / 3 < x % 2) (_ : 3 * x 2 * y + 4) : False := by omega
example {x : Int} (h₁ : 5 x) (h₂ : x 4) : False := by omega
example {x : Nat} (h₁ : 5 x) (h₂ : x 4) : False := by omega
example {x : Nat} (h₁ : x / 3 2) (h₂ : x < 6) : False := by omega
example {x : Int} {y : Nat} (_ : 0 < x) (_ : x + y 0) : False := by omega
example {a b c : Nat} (_ : a - (b - c) 5) (_ : b c + 3) (_ : a + c b + 6) : False := by omega
example {x : Nat} : 1 < (1 + ((x + 1 : Nat) : Int) + 2) / 2 := by omega
example {x : Nat} : (x + 4) / 2 x + 2 := by omega
example {x : Int} {m : Nat} (_ : 0 < m) (_ : ¬x % m < (m + 1) / 2) : -m / 2 x % m - m := by
omega
example (h : (7 : Int) = 0) : False := by omega
example (h : (7 : Int) 0) : False := by omega
example (h : (-7 : Int) + 14 = 0) : False := by omega
example (h : (-7 : Int) + 14 0) : False := by omega
example (h : (1 : Int) + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 = 0) : False := by
omega
example (h : (7 : Int) - 14 = 0) : False := by omega
example (h : (14 : Int) - 7 0) : False := by omega
example (h : (1 : Int) - 1 + 1 - 1 + 1 - 1 + 1 - 1 + 1 - 1 + 1 - 1 + 1 - 1 + 1 = 0) : False := by
omega
example (h : -(7 : Int) = 0) : False := by omega
example (h : -(-7 : Int) 0) : False := by omega
example (h : 2 * (7 : Int) = 0) : False := by omega
example (h : (7 : Int) < 0) : False := by omega
example {x : Int} (h : x + x + 1 = 0) : False := by omega
example {x : Int} (h : 2 * x + 1 = 0) : False := by omega
example {x y : Int} (h : x + x + y + y + 1 = 0) : False := by omega
example {x y : Int} (h : 2 * x + 2 * y + 1 = 0) : False := by omega
example {x : Int} (h₁ : 0 -7 + x) (h₂ : 0 3 - x) : False := by omega
example {x : Int} (h₁ : 0 -7 + x) (h₂ : 0 < 4 - x) : False := by omega
example {x : Int} (h₁ : 0 2 * x + 1) (h₂ : 2 * x + 1 0) : False := by omega
example {x : Int} (h₁ : 0 < 2 * x + 2) (h₂ : 2 * x + 1 0) : False := by omega
example {x y : Int} (h₁ : 0 2 * x + 1) (h₂ : x = y) (h₃ : 2 * y + 1 0) : False := by omega
example {x y z : Int} (h₁ : 0 2 * x + 1) (h₂ : x = y) (h₃ : y = z) (h₄ : 2 * z + 1 0) :
False := by omega
example {x1 x2 x3 x4 x5 x6 : Int} (h : 0 2 * x1 + 1) (h : x1 = x2) (h : x2 = x3) (h : x3 = x4)
(h : x4 = x5) (h : x5 = x6) (h : 2 * x6 + 1 0) : False := by omega
example {x : Int} (_ : 1 -3 * x) (_ : 1 2 * x) : False := by omega
example {x y : Int} (_ : 2 * x + 3 * y = 0) (_ : 1 x) (_ : 1 y) : False := by omega
example {x y z : Int} (_ : 2 * x + 3 * y = 0) (_ : 3 * y + 4 * z = 0) (_ : 1 x) (_ : 1 -z) :
False := by omega
example {x y z : Int} (_ : 2 * x + 3 * y + 4 * z = 0) (_ : 1 x + y) (_ : 1 y + z)
(_ : 1 x + z) : False := by omega
example {x y : Int} (_ : 1 3 * x) (_ : y 2) (_ : 6 * x - 2 y) : False := by omega
example {x y : Int} (_ : y = x) (_ : 0 x - 2 * y) (_ : x - 2 * y 1) (_ : 1 x) : False := by
omega
example {x y : Int} (_ : y = x) (_ : 0 x - 2 * y) (_ : x - 2 * y 1) (_ : x 1) : False := by
omega
example {x y : Int} (_ : y = x) (_ : 0 x - 2 * y) (_ : x - 2 * y 1) (_ : 0 < x) : False := by
omega
example {x y : Int} (_ : y = x) (_ : 0 x - 2 * y) (_ : x - 2 * y 1) (_ : x > 0) : False := by
omega
example {x : Nat} (_ : 10 x) (_ : ¬ 5 x) : False := by omega
example {x y : Nat} (_ : 5 x) (_ : ¬ 10 x) (_ : y = 7) (_ : x - y 2) (_ : x 6) : False := by
omega
example (x : Nat) : x % 4 - x % 8 = 0 := by omega
example {n : Nat} (_ : n > 0) : (2*n - 1) % 2 = 1 := by omega
example (x : Int) (_ : x > 0 x < -1) : False := by omega
example (x : Int) (_ : x > 7) : x < 0 x > 3 := by omega
example (_ : n : Nat, n < 0) : False := by omega
example (_ : { x : Int // x < 0 x > 0 }) : False := by omega
example {x y : Int} (_ : x < y) (z : { z : Int // y z z x }) : False := by omega
example (a b c d e : Int)
(ha : 2 * a + b + c + d + e = 4)
(hb : a + 2 * b + c + d + e = 5)
(hc : a + b + 2 * c + d + e = 6)
(hd : a + b + c + 2 * d + e = 7)
(he : a + b + c + d + 2 * e = 8) : e = 3 := by omega
example (a b c d e : Int)
(ha : 2 * a + b + c + d + e = 4)
(hb : a + 2 * b + c + d + e = 5)
(hc : a + b + 2 * c + d + e = 6)
(hd : a + b + c + 2 * d + e = 7)
(he : a + b + c + d + 2 * e = 8 e = 3) : e = 3 := by
fail_if_success omega (config := { splitDisjunctions := false })
omega
example {x : Int} (h : x = 7) : x.natAbs = 7 := by
fail_if_success omega (config := { splitNatAbs := false })
fail_if_success omega (config := { splitDisjunctions := false })
omega
example {x y : Int} (_ : (x - y).natAbs < 3) (_ : x < 5) (_ : y > 15) : False := by
omega
example {a b : Int} (h : a < b) (w : b < a) : False := by omega
example (_e b c a v0 v1 : Int) (_h1 : v0 = 5 * a) (_h2 : v1 = 3 * b) (h3 : v0 + v1 + c = 10) :
v0 + 5 + (v1 - 3) + (c - 2) = 10 := by omega
example (h : (1 : Int) < 0) (_ : ¬ (37 : Int) < 42) (_ : True) (_ : (-7 : Int) < 5) :
(3 : Int) < 7 := by omega
example (A B : Int) (h : 0 < A * B) : 0 < 8 * (A * B) := by omega
example (A B : Nat) (h : 7 < A * B) : 0 < A*B/8 := by omega
example (A B : Int) (h : 7 < A * B) : 0 < A*B/8 := by omega
example (ε : Int) (h1 : ε > 0) : ε / 2 + ε / 3 + ε / 7 < ε := by omega
example (x y z : Int) (h1 : 2*x < 3*y) (h2 : -4*x + z/2 < 0) (h3 : 12*y - z < 0) : False := by omega
example (ε : Int) (h1 : ε > 0) : ε / 2 < ε := by omega
example (ε : Int) (_ : ε > 0) : ε - 2 ε / 3 + ε / 3 + ε / 3 := by omega
example (ε : Int) (_ : ε > 0) : ε / 3 + ε / 3 + ε / 3 ε := by omega
example (ε : Int) (_ : ε > 0) : ε - 2 ε / 3 + ε / 3 + ε / 3 ε / 3 + ε / 3 + ε / 3 ε := by
omega
example (x : Int) (h : 0 < x) : 0 < x / 1 := by omega
example (x : Int) (h : 5 < x) : 0 < x/2/3 := by omega
example (_a b _c : Nat) (h2 : b + 2 > 3 + b) : False := by omega
example (_a b _c : Int) (h2 : b + 2 > 3 + b) : False := by omega
example (g v V c h : Int) (_ : h = 0) (_ : v = V) (_ : V > 0) (_ : g > 0)
(_ : 0 c) (_ : c < 1) : v V := by omega
example (x y z : Int) (h1 : 2 * x < 3 * y) (h2 : -4 * x + 2 * z < 0) (h3 : 12 * y - 4 * z < 0) :
False := by
omega
example (x y z : Int) (h1 : 2 * x < 3 * y) (h2 : -4 * x + 2 * z < 0) (_h3 : x * y < 5)
(h3 : 12 * y - 4 * z < 0) : False := by omega
example (a b c : Int) (h1 : a > 0) (h2 : b > 5) (h3 : c < -10) (h4 : a + b - c < 3) : False := by
omega
example (_ b _ : Int) (h2 : b > 0) (h3 : ¬ b 0) : False := by
omega
example (x y z : Int) (hx : x 3 * y) (h2 : y 2 * z) (h3 : x 6 * z) : x = 3 * y := by
omega
example (x y z : Int) (h1 : 2 * x < 3 * y) (h2 : -4 * x + 2 * z < 0) (_h3 : x * y < 5) :
¬ 12 * y - 4 * z < 0 := by
omega
example (x y z : Int) (hx : ¬ x > 3 * y) (h2 : ¬ y > 2 * z) (h3 : x 6 * z) : x = 3 * y := by
omega
example (x y : Int) (h : 6 + ((x + 4) * x + (6 + 3 * y) * y) = 3) (h' : (x + 4) * x 0)
(h'' : (6 + 3 * y) * y 0) : False := by omega
example (a : Int) (ha : 0 a) : 0 * 0 2 * a := by omega
example (x y : Int) (h : x < y) : x y := by omega
example (x y : Int) (h : x < y) : ¬ x = y := by omega
example (x : Int) : id x x := by omega
example (prime : Nat Prop) (x y z : Int) (h1 : 2 * x + ((-3) * y) < 0) (h2 : (-4) * x + 2* z < 0)
(h3 : 12 * y + (-4) * z < 0) (_ : prime 7) : False := by omega
example (i n : Nat) (h : (2 : Int) ^ i 2 ^ n) : (0 : Int) 2 ^ n - 2 ^ i := by omega
-- Check we use `exfalso` on non-comparison goals.
example (prime : Nat Prop) (_ b _ : Nat) (h2 : b > 0) (h3 : b < 0) : prime 10 := by
omega
example (a b c : Nat) (h2 : (2 : Nat) > 3) : a + b - c 3 := by omega
-- Verify that we split conjunctions in hypotheses.
example (x y : Int)
(h : 6 + ((x + 4) * x + (6 + 3 * y) * y) = 3 (x + 4) * x 0 (6 + 3 * y) * y 0) :
False := by omega
example (mess : Nat Nat) (S n : Nat) :
mess S + (n * mess S + n * 2 + 1) < n * mess S + mess S + (n * 2 + 2) := by omega
example (p n p' n' : Nat) (h : p + n' = p' + n) : n + p' = n' + p := by
omega
example (a b c : Int) (h1 : 32 / a < b) (h2 : b < c) : 32 / a < c := by omega
-- Check that `autoParam` wrappers do not get in the way of using hypotheses.
example (i n : Nat) (hi : i n := by omega) : i < n + 1 := by
omega
-- Test that we consume expression metadata when necessary.
example : 0 = 0 := by
have : 0 = 0 := by omega
omega -- This used to fail.
/-! ### `Prod.Lex` -/
-- This example comes from the termination proof
-- for `permutationsAux.rec` in `Mathlib.Data.List.Defs`.
example {x y : Nat} : Prod.Lex (· < ·) (· < ·) (x, x) (Nat.succ y + x, Nat.succ y) := by omega
-- We test the termination proof in-situ:
def List.permutationsAux.rec' {C : List α List α Sort v} (H0 : is, C [] is)
(H1 : t ts is, C ts (t :: is) C is [] C (t :: ts) is) : l₁ l₂, C l₁ l₂
| [], is => H0 is
| t :: ts, is =>
H1 t ts is (permutationsAux.rec' H0 H1 ts (t :: is)) (permutationsAux.rec' H0 H1 is [])
termination_by ts is => (length ts + length is, length ts)
decreasing_by all_goals simp_wf; omega
example {x y w z : Nat} (h : Prod.Lex (· < ·) (· < ·) (x + 1, y + 1) (w, z)) :
Prod.Lex (· < ·) (· < ·) (x, y) (w, z) := by omega
-- Verify that we can handle `iff` statements in hypotheses:
example (a b : Int) (h : a < 0 b < 0) (w : b > 3) : a 0 := by omega
-- Verify that we can prove `iff` goals:
example (a b : Int) (h : a > 7) (w : b > 2) : a > 0 b > 0 := by omega
-- Verify that we can prove implications:
example (a : Int) : a > 0 a > -1 := by omega
-- Verify that we can introduce multiple arguments:
example (x y : Int) : x + 1 y ¬ y + 1 x := by omega
-- Verify that we can handle double negation:
example (x y : Int) (_ : x < y) (_ : ¬ ¬ y < x) : False := by omega
-- Verify that we don't treat function goals as implications.
example (a : Nat) (h : a < 0) : Nat Nat := by omega
-- Example from Cedar:
example {a₁ a₂ p₁ p₂ : Nat}
(h₁ : a₁ = a₂ ¬p₁ = p₂) :
(a₁ < a₂ a₁ = a₂ p₁ < p₂) a₂ < a₁ a₂ = a₁ p₂ < p₁ := by omega
-- From https://github.com/leanprover/std4/issues/562
example {i : Nat} (h1 : i < 330) (_h2 : 7 (660 + i) * (1319 - i)) : 1319 - i < 1979 := by
omega
example {a : Int} (_ : a < min a b) : False := by omega (config := { splitMinMax := false })
example {a : Int} (_ : max a b < b) : False := by omega (config := { splitMinMax := false })
example {a : Nat} (_ : a < min a b) : False := by omega (config := { splitMinMax := false })
example {a : Nat} (_ : max a b < b) : False := by omega (config := { splitMinMax := false })
example {a b : Nat} (_ : a = 7) (_ : b = 3) : min a b = 3 := by
fail_if_success omega (config := { splitMinMax := false })
omega
example {a b : Nat} (_ : a + b = 9) : (min a b) % 2 + (max a b) % 2 = 1 := by
fail_if_success omega (config := { splitMinMax := false })
omega
example {a : Int} (_ : a < if a b then a else b) : False := by omega
example {a b : Int} : (if a < b then a else b - 1) b := by omega
-- Check that we use local values.
example (i j : Nat) (p : i j) : True := by
let l := j - 1
have _ : i l := by omega
trivial
example (i j : Nat) (p : i j) : True := by
let l := j - 1
let k := l
have _ : i k := by omega
trivial
example (i : Fin 7) : (i : Nat) < 8 := by omega
example (x y z i : Nat) (hz : z 1) : x % 2 ^ i + y % 2 ^ i + z < 2 * 2^ i := by omega