Compare commits

..

1 Commits

Author SHA1 Message Date
Kim Morrison
28cd14e261 chore: List/Array/Vector implicitness changes 2025-03-31 09:32:47 +11:00
800 changed files with 4340 additions and 19862 deletions

View File

@@ -1,4 +1,3 @@
# instantiated by ci.yml
name: build-template
on:
workflow_call:
@@ -46,7 +45,7 @@ jobs:
CCACHE_DIR: ${{ github.workspace }}/.ccache
CCACHE_COMPRESS: true
# current cache limit
CCACHE_MAXSIZE: 600M
CCACHE_MAXSIZE: 200M
# squelch error message about missing nixpkgs channel
NIX_BUILD_SHELL: bash
LSAN_OPTIONS: max_leaks=10
@@ -98,22 +97,32 @@ jobs:
sudo apt-get install -y gcc-multilib g++-multilib ccache libuv1-dev:i386 pkgconf:i386
if: matrix.cmultilib
- name: Cache
id: restore-cache
if: matrix.name != 'Linux Lake'
uses: actions/cache/restore@v4
uses: actions/cache@v4
with:
# NOTE: must be in sync with `save` below
path: |
.ccache
${{ matrix.name == 'Linux Lake' && 'build/stage1/**/*.trace
build/stage1/**/*.olean
build/stage1/**/*.ilean
build/stage1/**/*.c
build/stage1/**/*.c.o*' || '' }}
key: ${{ matrix.name }}-build-v3-${{ github.event.pull_request.head.sha }}
# fall back to (latest) previous cache
restore-keys: |
${{ matrix.name }}-build-v3
save-always: true
- name: Cache
if: matrix.name == 'Linux Lake'
uses: actions/cache@v4
with:
path: |
.ccache
build/stage1/**/*.trace
build/stage1/**/*.olean
build/stage1/**/*.ilean
build/stage1/**/*.c
build/stage1/**/*.c.o*
key: ${{ matrix.name }}-build-v3-${{ github.event.pull_request.head.sha }}
# fall back to (latest) previous cache
restore-keys: |
${{ matrix.name }}-build-v3
save-always: true
# open nix-shell once for initial setup
- name: Setup
run: |
@@ -193,7 +202,7 @@ jobs:
id: test
run: |
ulimit -c unlimited # coredumps
time ctest --preset ${{ matrix.CMAKE_PRESET || 'release' }} --test-dir build/stage1 -j$NPROC --output-junit test-results.xml ${{ matrix.CTEST_OPTIONS }} --timeout 200
time ctest --preset ${{ matrix.CMAKE_PRESET || 'release' }} --test-dir build/stage1 -j$NPROC --output-junit test-results.xml ${{ matrix.CTEST_OPTIONS }}
if: (matrix.wasm || !matrix.cross) && inputs.check-level >= 1
- name: Test Summary
uses: test-summary/action@v2
@@ -227,7 +236,6 @@ jobs:
make -C build update-stage0 && rm -rf build/stage* && make -C build -j$NPROC
if: matrix.name == 'Linux' && inputs.check-level >= 1
- name: CCache stats
if: always()
run: ccache -s
- name: Show stacktrace for coredumps
if: failure() && runner.os == 'Linux'
@@ -235,17 +243,4 @@ jobs:
for c in $(find . -name core); do
progbin="$(file $c | sed "s/.*execfn: '\([^']*\)'.*/\1/")"
echo bt | $GDB/bin/gdb -q $progbin $c || true
done
- name: Save Cache
if: always() && steps.restore-cache.outputs.cache-hit != 'true'
uses: actions/cache/save@v4
with:
# NOTE: must be in sync with `restore` above
path: |
.ccache
${{ matrix.name == 'Linux Lake' && 'build/stage1/**/*.trace
build/stage1/**/*.olean
build/stage1/**/*.ilean
build/stage1/**/*.c
build/stage1/**/*.c.o*' || '' }}
key: ${{ steps.restore-cache.outputs.cache-primary-key }}
done

View File

@@ -166,8 +166,6 @@ jobs:
// foreign code may be linked against more recent glibc
"CTEST_OPTIONS": "-E 'foreign'"
},
// deactivated due to bugs
/*
{
"name": "Linux Lake",
"os": large ? "nscloud-ubuntu-22.04-amd64-4x8" : "ubuntu-latest",
@@ -178,7 +176,6 @@ jobs:
// TODO: why does this fail?
"CTEST_OPTIONS": "-E 'scopedMacros'"
},
*/
{
"name": "Linux",
"os": large ? "nscloud-ubuntu-22.04-amd64-4x8" : "ubuntu-latest",

1
.gitignore vendored
View File

@@ -22,7 +22,6 @@ settings.json
.gdb_history
.vscode/*
!.vscode/settings.json
script/__pycache__
*.produced.out
CMakeSettings.json
CppProperties.json

View File

@@ -1,7 +1,4 @@
cmake_minimum_required(VERSION 3.11)
option(USE_MIMALLOC "use mimalloc" ON)
# store all variables passed on the command line into CL_ARGS so we can pass them to the stage builds
# https://stackoverflow.com/a/48555098/161659
# MUST be done before call to 'project'
@@ -17,12 +14,10 @@ foreach(var ${vars})
if("${var}" MATCHES "USE_GMP|CHECK_OLEAN_VERSION")
# must forward options that generate incompatible .olean format
list(APPEND STAGE0_ARGS "-D${var}=${${var}}")
elseif("${var}" MATCHES "LLVM*|PKG_CONFIG|USE_LAKE|USE_MIMALLOC")
endif()
if("${var}" MATCHES "LLVM*|PKG_CONFIG|USE_LAKE")
list(APPEND STAGE0_ARGS "-D${var}=${${var}}")
endif()
elseif("${var}" MATCHES "USE_MIMALLOC")
list(APPEND CL_ARGS "-D${var}=${${var}}")
list(APPEND STAGE0_ARGS "-D${var}=${${var}}")
elseif(("${var}" MATCHES "CMAKE_.*") AND NOT ("${var}" MATCHES "CMAKE_BUILD_TYPE") AND NOT ("${var}" MATCHES "CMAKE_HOME_DIRECTORY"))
list(APPEND PLATFORM_ARGS "-D${var}=${${var}}")
endif()
@@ -60,23 +55,11 @@ if (NOT ${CMAKE_SYSTEM_NAME} MATCHES "Emscripten")
BUILD_IN_SOURCE ON
INSTALL_COMMAND "")
set(CADICAL ${CMAKE_BINARY_DIR}/cadical/cadical${CMAKE_EXECUTABLE_SUFFIX} CACHE FILEPATH "path to cadical binary" FORCE)
list(APPEND EXTRA_DEPENDS cadical)
set(EXTRA_DEPENDS "cadical")
endif()
list(APPEND CL_ARGS -DCADICAL=${CADICAL})
endif()
if (USE_MIMALLOC)
ExternalProject_add(mimalloc
PREFIX mimalloc
GIT_REPOSITORY https://github.com/microsoft/mimalloc
GIT_TAG v2.2.3
# just download, we compile it as part of each stage as it is small
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
INSTALL_COMMAND "")
list(APPEND EXTRA_DEPENDS mimalloc)
endif()
ExternalProject_add(stage0
SOURCE_DIR "${LEAN_SOURCE_DIR}/stage0"
SOURCE_SUBDIR src

View File

@@ -30,7 +30,6 @@
"LEANC_EXTRA_CC_FLAGS": "-fsanitize=address,undefined",
"LEAN_EXTRA_LINKER_FLAGS": "-fsanitize=address,undefined -fsanitize-link-c++-runtime",
"SMALL_ALLOCATOR": "OFF",
"USE_MIMALLOC": "OFF",
"BSYMBOLIC": "OFF",
"LEAN_TEST_VARS": "MAIN_STACK_SIZE=16000"
},

View File

@@ -5,7 +5,7 @@ See below for the checklist for release candidates.
We'll use `v4.6.0` as the intended release version as a running example.
- Run `script/release_checklist.py v4.6.0` to check the status of the release.
- Run `scripts/release_checklist.py v4.6.0` to check the status of the release.
This script is purely informational, idempotent, and safe to run at any stage of the release process.
- `git checkout releases/v4.6.0`
(This branch should already exist, from the release candidates.)
@@ -13,8 +13,7 @@ We'll use `v4.6.0` as the intended release version as a running example.
- In `src/CMakeLists.txt`, verify you see
- `set(LEAN_VERSION_MINOR 6)` (for whichever `6` is appropriate)
- `set(LEAN_VERSION_IS_RELEASE 1)`
- (all of these should already be in place from the release candidates)
- (both of these should already be in place from the release candidates)
- `git tag v4.6.0`
- `git push $REMOTE v4.6.0`, where `$REMOTE` is the upstream Lean repository (e.g., `origin`, `upstream`)
- Now wait, while CI runs.
@@ -42,10 +41,6 @@ We'll use `v4.6.0` as the intended release version as a running example.
- In order to have the access rights to push to these repositories and merge PRs,
you will need to be a member of the `lean-release-managers` team at both `leanprover-community` and `leanprover`.
Contact Kim Morrison (@kim-em) to arrange access.
- There is an experimental script that will guide you through the steps for each of the repositories below.
The script should be invoked as
`script/release_steps.py vx.y.x <repo>` where `<repo>` is a case-insensitive substring of the repo name.
For example: `script/release_steps.py v4.6.0 batt` will guide you through the steps for the Batteries repository.
- For each of the repositories listed below:
- Make a PR to `master`/`main` changing the toolchain to `v4.6.0`
- The usual branch name would be `bump_to_v4.6.0`.
@@ -85,11 +80,6 @@ We'll use `v4.6.0` as the intended release version as a running example.
- Toolchain bump PR including updated Lake manifest
- Create and push the tag
- There is no `stable` branch; skip this step
- [Reference Manual](https://github.com/leanprover/reference-manual)
- Dependencies: Verso
- Toolchain bump PR including updated Lake manifest
- Pushing the tag (whether for an RC or a final) triggers a deployment.
- There is no `stable` branch; skip this step
- [Cli](https://github.com/leanprover/lean4-cli)
- No dependencies
- Toolchain bump PR
@@ -133,10 +123,6 @@ We'll use `v4.6.0` as the intended release version as a running example.
- Toolchain bump PR including updated Lake manifest
- Create and push the tag
- Merge the tag into `stable`
- An awkward situtation that sometimes occurs (e.g. with Verso) is that one of these upstream dependencies has
already moved its `master`/`main` branch to a nightly toolchain that comes *after* the stable toolchain we are
targeting. In this case it is necessary to create a branch `releases/v4.6.0` from the last commit which was on
an earlier toolchain, move that branch to the stable toolchain, and create the toolchain tag from that branch.
- Run `script/release_checklist.py v4.6.0` again to check that everything is in order.
- Finally, make an announcement!
This should go in https://leanprover.zulipchat.com/#narrow/stream/113486-announce, with topic `v4.6.0`.
@@ -174,7 +160,6 @@ We'll use `v4.7.0-rc1` as the intended release version in this example.
git fetch nightly tag nightly-2024-02-29
git checkout nightly-2024-02-29
git checkout -b releases/v4.7.0
git push --set-upstream origin releases/v4.18.0
```
- In `RELEASES.md` replace `Development in progress` in the `v4.7.0` section with `Release notes to be written.`
- In `src/CMakeLists.txt`,
@@ -184,7 +169,6 @@ We'll use `v4.7.0-rc1` as the intended release version in this example.
- `git tag v4.7.0-rc1`
- `git push origin v4.7.0-rc1`
- Now wait, while CI runs.
- The CI setup parses the tag to discover the `-rc1` special description, and passes it to `cmake` using a `-D` option. The `-rc1` doesn't need to be placed in the configuration file.
- You can monitor this at `https://github.com/leanprover/lean4/actions/workflows/ci.yml`, looking for the `v4.7.0-rc1` tag.
- This step can take up to an hour.
- (GitHub release notes) Once the release appears at https://github.com/leanprover/lean4/releases/
@@ -228,8 +212,20 @@ We'll use `v4.7.0-rc1` as the intended release version in this example.
You will want a few bullet points for main topics from the release notes.
Please also make sure that whoever is handling social media knows the release is out.
- Begin the next development cycle (i.e. for `v4.8.0`) on the Lean repository, by making a PR that:
- Uses branch name `dev_cycle_v4.8`.
- Updates `src/CMakeLists.txt` to say `set(LEAN_VERSION_MINOR 8)`
- Replaces the "release notes will be copied" text in the `v4.6.0` section of `RELEASES.md` with the
finalized release notes from the `releases/v4.6.0` branch.
- Replaces the "development in progress" in the `v4.7.0` section of `RELEASES.md` with
```
Release candidate, release notes will be copied from the branch `releases/v4.7.0` once completed.
```
and inserts the following section before that section:
```
v4.8.0
----------
Development in progress.
```
- Removes all the entries from the `./releases_drafts/` folder.
- Titled "chore: begin development cycle for v4.8.0"

View File

@@ -17,7 +17,7 @@ lib.warn "The Nix-based build is deprecated" rec {
'';
} // args // {
src = args.realSrc or (sourceByRegex args.src [ "[a-z].*" "CMakeLists\.txt" ]);
cmakeFlags = ["-DSMALL_ALLOCATOR=ON" "-DUSE_MIMALLOC=OFF"] ++ (args.cmakeFlags or [ "-DSTAGE=1" "-DPREV_STAGE=./faux-prev-stage" "-DUSE_GITHASH=OFF" "-DCADICAL=${cadical}/bin/cadical" ]) ++ (args.extraCMakeFlags or extraCMakeFlags) ++ lib.optional (args.debug or debug) [ "-DCMAKE_BUILD_TYPE=Debug" ];
cmakeFlags = (args.cmakeFlags or [ "-DSTAGE=1" "-DPREV_STAGE=./faux-prev-stage" "-DUSE_GITHASH=OFF" "-DCADICAL=${cadical}/bin/cadical" ]) ++ (args.extraCMakeFlags or extraCMakeFlags) ++ lib.optional (args.debug or debug) [ "-DCMAKE_BUILD_TYPE=Debug" ];
preConfigure = args.preConfigure or "" + ''
# ignore absence of submodule
sed -i 's!lake/Lake.lean!!' CMakeLists.txt

View File

@@ -1,6 +0,0 @@
**Breaking Changes**
* The functions `Lean.Environment.importModules` and `Lean.Environment.finalizeImport` have been extended with a new parameter `loadExts : Bool := false` that enables environment extension state loading.
Their previous behavior corresponds to setting the flag to `true` but is only safe to do in combination with `enableInitializersExecution`; see also the `importModules` docstring.
The new default value `false` ensures the functions can be used correctly multiple times within the same process when environment extension access is not needed.
The wrapper function `Lean.Environment.withImportModules` now always calls `importModules` with `loadExts := false` as it is incompatible with extension loading.

View File

@@ -1,70 +0,0 @@
import Lean.Data.Lsp
open Lean
open Lean.Lsp
open Lean.JsonRpc
/-!
Tests language server memory use by repeatedly re-elaborate a given file.
NOTE: only works on Linux for now.
HACK: The line that is to be prepended with a space is hard-coded below to be sufficiently far down
not to touch the imports for usual files.
-/
def main (args : List String) : IO Unit := do
let leanCmd :: file :: iters :: args := args | panic! "usage: script <lean> <file> <#iterations> <server-args>..."
let uri := s!"file:///{file}"
Ipc.runWith leanCmd (#["--worker", "-DstderrAsMessages=false"] ++ args ++ #[uri]) do
-- for use with heaptrack:
--Ipc.runWith "heaptrack" (#[leanCmd, "--worker", "-DstderrAsMessages=false"] ++ args ++ #[uri]) do
-- -- heaptrack has no quiet mode??
-- let _ ← (← Ipc.stdout).getLine
-- let _ ← (← Ipc.stdout).getLine
let capabilities := {
textDocument? := some {
completion? := some {
completionItem? := some {
insertReplaceSupport? := true
}
}
}
}
Ipc.writeRequest 0, "initialize", { capabilities : InitializeParams }
let text IO.FS.readFile file
let mut requestNo : Nat := 1
let mut versionNo : Nat := 1
Ipc.writeNotification "textDocument/didOpen", {
textDocument := { uri := uri, languageId := "lean", version := 1, text := text } : DidOpenTextDocumentParams }
for i in [0:iters.toNat!] do
if i > 0 then
versionNo := versionNo + 1
let pos := { line := 19, character := 0 }
let params : DidChangeTextDocumentParams := {
textDocument := {
uri := uri
version? := versionNo
}
contentChanges := #[TextDocumentContentChangeEvent.rangeChange {
start := pos
«end» := pos
} " "]
}
let params := toJson params
Ipc.writeNotification "textDocument/didChange", params
requestNo := requestNo + 1
let diags Ipc.collectDiagnostics requestNo uri versionNo
if let some diags := diags then
for diag in diags.param.diagnostics do
IO.eprintln diag.message
requestNo := requestNo + 1
let status IO.FS.readFile s!"/proc/{(← read).pid}/status"
for line in status.splitOn "\n" |>.filter (·.startsWith "RssAnon") do
IO.eprintln line
let _ Ipc.collectDiagnostics requestNo uri versionNo
( Ipc.stdin).writeLspMessage (Message.notification "exit" none)
discard <| Ipc.waitForExit

View File

@@ -1,167 +0,0 @@
#!/usr/bin/env python3
"""
Merge a tag into a branch on a GitHub repository.
This script checks if a specified tag can be merged cleanly into a branch and performs
the merge if possible. If the merge cannot be done cleanly, it prints a helpful message.
Usage:
python3 merge_remote.py <org/repo> <branch> <tag>
Arguments:
org/repo: GitHub repository in the format 'organization/repository'
branch: The target branch to merge into
tag: The tag to merge from
Example:
python3 merge_remote.py leanprover/mathlib4 stable v4.6.0
The script uses the GitHub CLI (`gh`), so make sure it's installed and authenticated.
"""
import argparse
import subprocess
import sys
import tempfile
import os
import shutil
def run_command(command, check=True, capture_output=True):
"""Run a shell command and return the result."""
try:
result = subprocess.run(
command,
check=check,
shell=True,
text=True,
capture_output=capture_output
)
return result
except subprocess.CalledProcessError as e:
if capture_output:
print(f"Command failed: {command}")
print(f"Error: {e.stderr}")
return e
def clone_repo(repo, temp_dir):
"""Clone the repository to a temporary directory using shallow clone."""
print(f"Shallow cloning {repo}...")
# Keep the shallow clone for efficiency
clone_result = run_command(f"gh repo clone {repo} {temp_dir} -- --depth=1", check=False)
if clone_result.returncode != 0:
print(f"Failed to clone repository {repo}.")
print(f"Error: {clone_result.stderr}")
return False
return True
def check_and_merge(repo, branch, tag, temp_dir):
"""Check if tag can be merged into branch and perform the merge if possible."""
# Change to the temporary directory
os.chdir(temp_dir)
# First fetch the specific remote branch with its history
print(f"Fetching branch '{branch}'...")
fetch_branch = run_command(f"git fetch origin {branch}:refs/remotes/origin/{branch} --update-head-ok")
if fetch_branch.returncode != 0:
print(f"Error: Failed to fetch branch '{branch}'.")
return False
# Then fetch the specific tag
print(f"Fetching tag '{tag}'...")
fetch_tag = run_command(f"git fetch origin tag {tag}")
if fetch_tag.returncode != 0:
print(f"Error: Failed to fetch tag '{tag}'.")
return False
# Check if branch exists now that we've fetched it
branch_check = run_command(f"git branch -r | grep origin/{branch}")
if branch_check.returncode != 0:
print(f"Error: Branch '{branch}' does not exist in repository.")
return False
# Check if tag exists
tag_check = run_command(f"git tag -l {tag}")
if tag_check.returncode != 0 or not tag_check.stdout.strip():
print(f"Error: Tag '{tag}' does not exist in repository.")
return False
# Checkout the branch
print(f"Checking out branch '{branch}'...")
checkout_result = run_command(f"git checkout -b {branch} origin/{branch}")
if checkout_result.returncode != 0:
return False
# Try merging the tag in a dry-run to check if it can be merged cleanly
print(f"Checking if {tag} can be merged cleanly into {branch}...")
merge_check = run_command(f"git merge --no-commit --no-ff {tag}", check=False)
if merge_check.returncode != 0:
print(f"Cannot merge {tag} cleanly into {branch}.")
print("Merge conflicts would occur. Aborting merge.")
run_command("git merge --abort")
return False
# Abort the test merge
run_command("git reset --hard HEAD")
# Now perform the actual merge and push to remote
print(f"Merging {tag} into {branch}...")
merge_result = run_command(f"git merge {tag} --no-edit")
if merge_result.returncode != 0:
print(f"Failed to merge {tag} into {branch}.")
return False
print(f"Pushing changes to remote...")
push_result = run_command(f"git push origin {branch}")
if push_result.returncode != 0:
print(f"Failed to push changes to remote.")
return False
print(f"Successfully merged {tag} into {branch} and pushed to remote.")
return True
def main():
parser = argparse.ArgumentParser(
description="Merge a tag into a branch on a GitHub repository.",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
%(prog)s leanprover/mathlib4 stable v4.6.0 Merge tag v4.6.0 into stable branch
The script will:
1. Clone the repository
2. Check if the tag and branch exist
3. Check if the tag can be merged cleanly into the branch
4. Perform the merge and push to remote if possible
"""
)
parser.add_argument("repo", help="GitHub repository in the format 'organization/repository'")
parser.add_argument("branch", help="The target branch to merge into")
parser.add_argument("tag", help="The tag to merge from")
args = parser.parse_args()
# Create a temporary directory for the repository
temp_dir = tempfile.mkdtemp()
try:
# Clone the repository
if not clone_repo(args.repo, temp_dir):
sys.exit(1)
# Check if the tag can be merged and perform the merge
if not check_and_merge(args.repo, args.branch, args.tag, temp_dir):
sys.exit(1)
finally:
# Clean up the temporary directory
print(f"Cleaning up temporary files...")
shutil.rmtree(temp_dir)
if __name__ == "__main__":
main()

View File

@@ -7,13 +7,6 @@ import base64
import subprocess
import sys
import os
# Import run_command from merge_remote.py
from merge_remote import run_command
def debug(verbose, message):
"""Print debug message if verbose mode is enabled."""
if verbose:
print(f" [DEBUG] {message}")
def parse_repos_config(file_path):
with open(file_path, "r") as f:
@@ -92,12 +85,9 @@ def parse_version(version_str):
def is_version_gte(version1, version2):
"""Check if version1 >= version2, including proper handling of release candidates."""
# Check if version1 is a nightly toolchain
if version1.startswith("leanprover/lean4:nightly-"):
return False
return parse_version(version1) >= parse_version(version2)
def is_merged_into_stable(repo_url, tag_name, stable_branch, github_token, verbose=False):
def is_merged_into_stable(repo_url, tag_name, stable_branch, github_token):
# First get the commit SHA for the tag
api_base = repo_url.replace("https://github.com/", "https://api.github.com/repos/")
headers = {'Authorization': f'token {github_token}'} if github_token else {}
@@ -105,7 +95,6 @@ def is_merged_into_stable(repo_url, tag_name, stable_branch, github_token, verbo
# Get tag's commit SHA
tag_response = requests.get(f"{api_base}/git/refs/tags/{tag_name}", headers=headers)
if tag_response.status_code != 200:
debug(verbose, f"Could not fetch tag {tag_name}, status code: {tag_response.status_code}")
return False
# Handle both single object and array responses
@@ -114,48 +103,22 @@ def is_merged_into_stable(repo_url, tag_name, stable_branch, github_token, verbo
# Find the exact matching tag in the list
matching_tags = [tag for tag in tag_data if tag['ref'] == f'refs/tags/{tag_name}']
if not matching_tags:
debug(verbose, f"No matching tag found for {tag_name} in response list")
return False
tag_sha = matching_tags[0]['object']['sha']
else:
tag_sha = tag_data['object']['sha']
# Check if the tag is an annotated tag and get the actual commit SHA
if tag_data.get('object', {}).get('type') == 'tag' or (
isinstance(tag_data, list) and
matching_tags and
matching_tags[0].get('object', {}).get('type') == 'tag'):
# Get the commit that this tag points to
tag_obj_response = requests.get(f"{api_base}/git/tags/{tag_sha}", headers=headers)
if tag_obj_response.status_code == 200:
tag_obj = tag_obj_response.json()
if 'object' in tag_obj and tag_obj['object']['type'] == 'commit':
commit_sha = tag_obj['object']['sha']
debug(verbose, f"Tag is annotated. Resolved commit SHA: {commit_sha}")
tag_sha = commit_sha # Use the actual commit SHA
# Get commits on stable branch containing this SHA
commits_response = requests.get(
f"{api_base}/commits?sha={stable_branch}&per_page=100",
headers=headers
)
if commits_response.status_code != 200:
debug(verbose, f"Could not fetch commits for branch {stable_branch}, status code: {commits_response.status_code}")
return False
# Check if any commit in stable's history matches our tag's SHA
stable_commits = [commit['sha'] for commit in commits_response.json()]
is_merged = tag_sha in stable_commits
debug(verbose, f"Tag SHA: {tag_sha}")
debug(verbose, f"First 5 stable commits: {stable_commits[:5]}")
debug(verbose, f"Total stable commits fetched: {len(stable_commits)}")
if not is_merged:
debug(verbose, f"Tag SHA not found in first {len(stable_commits)} commits of stable branch")
return is_merged
return tag_sha in stable_commits
def is_release_candidate(version):
return "-rc" in version
@@ -215,75 +178,51 @@ def check_bump_branch_toolchain(url, bump_branch, github_token):
print(f" ✅ Bump branch correctly uses toolchain: {content}")
return True
def pr_exists_with_title(repo_url, title, github_token):
api_url = repo_url.replace("https://github.com/", "https://api.github.com/repos/") + "/pulls"
headers = {'Authorization': f'token {github_token}'} if github_token else {}
params = {'state': 'open'}
response = requests.get(api_url, headers=headers, params=params)
if response.status_code != 200:
return None
pull_requests = response.json()
for pr in pull_requests:
if pr['title'] == title:
return pr['number'], pr['html_url']
return None
def main():
parser = argparse.ArgumentParser(description="Check release status of Lean4 repositories")
parser.add_argument("toolchain", help="The toolchain version to check (e.g., v4.6.0)")
parser.add_argument("--verbose", "-v", action="store_true", help="Enable verbose debugging output")
parser.add_argument("--dry-run", action="store_true", help="Dry run mode (no actions taken)")
args = parser.parse_args()
github_token = get_github_token()
toolchain = args.toolchain
verbose = args.verbose
# dry_run = args.dry_run # Not used yet but available for future implementation
if len(sys.argv) != 2:
print("Usage: python3 release_checklist.py <toolchain>")
sys.exit(1)
toolchain = sys.argv[1]
stripped_toolchain = strip_rc_suffix(toolchain)
lean_repo_url = "https://github.com/leanprover/lean4"
# Track repository status
repo_status = {} # Will store True for success, False for failure
# Preliminary checks for lean4 itself
# Preliminary checks
print("\nPerforming preliminary checks...")
lean4_success = True
# Check for branch releases/v4.Y.0
version_major, version_minor, _ = map(int, stripped_toolchain.lstrip('v').split('.'))
branch_name = f"releases/v{version_major}.{version_minor}.0"
if not branch_exists(lean_repo_url, branch_name, github_token):
print(f" ❌ Branch {branch_name} does not exist")
lean4_success = False
else:
if branch_exists(lean_repo_url, branch_name, github_token):
print(f" ✅ Branch {branch_name} exists")
# Check CMake version settings
if not check_cmake_version(lean_repo_url, branch_name, version_major, version_minor, github_token):
lean4_success = False
# Check for tag and release page
if not tag_exists(lean_repo_url, toolchain, github_token):
print(f" ❌ Tag {toolchain} does not exist.")
lean4_success = False
check_cmake_version(lean_repo_url, branch_name, version_major, version_minor, github_token)
else:
print(f" ❌ Branch {branch_name} does not exist")
# Check for tag v4.X.Y(-rcZ)
if tag_exists(lean_repo_url, toolchain, github_token):
print(f" ✅ Tag {toolchain} exists")
if not release_page_exists(lean_repo_url, toolchain, github_token):
print(f" ❌ Release page for {toolchain} does not exist")
lean4_success = False
else:
print(f" ❌ Tag {toolchain} does not exist.")
# Check for release page
if release_page_exists(lean_repo_url, toolchain, github_token):
print(f" ✅ Release page for {toolchain} exists")
# Check the first line of the release notes
release_notes = get_release_notes(lean_repo_url, toolchain, github_token)
if not (release_notes and toolchain in release_notes.splitlines()[0].strip()):
if release_notes and toolchain in release_notes.splitlines()[0].strip():
print(f" ✅ Release notes look good.")
else:
previous_minor_version = version_minor - 1
previous_release = f"v{version_major}.{previous_minor_version}.0"
print(f" ❌ Release notes not published. Please run `script/release_notes.py --since {previous_release}` on branch `{branch_name}`.")
lean4_success = False
else:
print(f" ✅ Release notes look good.")
repo_status["lean4"] = lean4_success
else:
print(f" ❌ Release page for {toolchain} does not exist")
# Load repositories and perform further checks
print("\nChecking repositories...")
@@ -294,100 +233,50 @@ def main():
for repo in repos:
name = repo["name"]
url = repo["url"]
org_repo = extract_org_repo_from_url(url)
branch = repo["branch"]
check_stable = repo["stable-branch"]
check_tag = repo.get("toolchain-tag", True)
check_bump = repo.get("bump-branch", False)
dependencies = repo.get("dependencies", [])
print(f"\nRepository: {name}")
# Check if any dependencies have failed
failed_deps = [dep for dep in dependencies if dep in repo_status and not repo_status[dep]]
if failed_deps:
print(f" 🟡 Dependencies not ready: {', '.join(failed_deps)}")
repo_status[name] = False
continue
# Initialize success flag for this repo
success = True
# Check if branch is on at least the target toolchain
lean_toolchain_content = get_branch_content(url, branch, "lean-toolchain", github_token)
if lean_toolchain_content is None:
print(f" ❌ No lean-toolchain file found in {branch} branch")
repo_status[name] = False
continue
on_target_toolchain = is_version_gte(lean_toolchain_content.strip(), toolchain)
if not on_target_toolchain:
print(f" ❌ Not on target toolchain (needs ≥ {toolchain}, but {branch} is on {lean_toolchain_content.strip()})")
pr_title = f"chore: bump toolchain to {toolchain}"
pr_info = pr_exists_with_title(url, pr_title, github_token)
if pr_info:
pr_number, pr_url = pr_info
print(f" ✅ PR with title '{pr_title}' exists: #{pr_number} ({pr_url})")
else:
print(f" ❌ PR with title '{pr_title}' does not exist")
print(f" Run `script/release_steps.py {toolchain} {name}` to create it")
repo_status[name] = False
continue
print(f" ✅ On compatible toolchain (>= {toolchain})")
# Only check for tag if toolchain-tag is true
if check_tag:
tag_exists_initially = tag_exists(url, toolchain, github_token)
if not tag_exists_initially:
if args.dry_run:
print(f" Tag {toolchain} does not exist. Run `script/push_repo_release_tag.py {org_repo} {branch} {toolchain}`.")
repo_status[name] = False
continue
else:
print(f" … Tag {toolchain} does not exist. Running `script/push_repo_release_tag.py {org_repo} {branch} {toolchain}`...")
# Run the script to create the tag
subprocess.run(["script/push_repo_release_tag.py", org_repo, branch, toolchain])
# Check again if the tag exists now
if not tag_exists(url, toolchain, github_token):
print(f" ❌ Manual intervention required.")
repo_status[name] = False
continue
# This will print in all successful cases - whether tag existed initially or was created successfully
print(f" ✅ Tag {toolchain} exists")
if not tag_exists(url, toolchain, github_token):
print(f" ❌ Tag {toolchain} does not exist. Run `script/push_repo_release_tag.py {extract_org_repo_from_url(url)} {branch} {toolchain}`.")
else:
print(f" Tag {toolchain} exists")
# Only check merging into stable if stable-branch is true and not a release candidate
if check_stable and not is_release_candidate(toolchain):
if not is_merged_into_stable(url, toolchain, "stable", github_token, verbose):
org_repo = extract_org_repo_from_url(url)
if not is_merged_into_stable(url, toolchain, "stable", github_token):
print(f" ❌ Tag {toolchain} is not merged into stable")
print(f" Run `script/merge_remote.py {org_repo} stable {toolchain}` to merge it")
repo_status[name] = False
continue
print(f" ✅ Tag {toolchain} is merged into stable")
else:
print(f" ✅ Tag {toolchain} is merged into stable")
# Check for bump branch if configured
if check_bump:
next_version = get_next_version(toolchain)
bump_branch = f"bump/{next_version}"
if not branch_exists(url, bump_branch, github_token):
if args.dry_run:
print(f" ❌ Bump branch {bump_branch} does not exist. Run `gh api -X POST /repos/{org_repo}/git/refs -f ref=refs/heads/{bump_branch} -f sha=$(gh api /repos/{org_repo}/git/refs/heads/{branch} --jq .object.sha)` to create it.")
repo_status[name] = False
continue
print(f" … Bump branch {bump_branch} does not exist. Creating it...")
result = run_command(f"gh api -X POST /repos/{org_repo}/git/refs -f ref=refs/heads/{bump_branch} -f sha=$(gh api /repos/{org_repo}/git/refs/heads/{branch} --jq .object.sha)", check=False)
if result.returncode != 0:
print(f" ❌ Failed to create bump branch {bump_branch}")
repo_status[name] = False
continue
print(f" ✅ Bump branch {bump_branch} exists")
if not check_bump_branch_toolchain(url, bump_branch, github_token):
repo_status[name] = False
continue
if branch_exists(url, bump_branch, github_token):
print(f" ✅ Bump branch {bump_branch} exists")
check_bump_branch_toolchain(url, bump_branch, github_token)
else:
print(f" ❌ Bump branch {bump_branch} does not exist")
repo_status[name] = success
# Final check for lean4 master branch
# Check lean4 master branch for next development cycle
print("\nChecking lean4 master branch configuration...")
next_version = get_next_version(toolchain)
next_minor = int(next_version.split('.')[1])

View File

@@ -63,9 +63,7 @@ repositories:
toolchain-tag: true
stable-branch: false
branch: main
dependencies:
- Cli
- Batteries
dependencies: []
- name: plausible
url: https://github.com/leanprover-community/plausible
@@ -87,7 +85,6 @@ repositories:
- Batteries
- doc-gen4
- import-graph
- plausible
- name: REPL
url: https://github.com/leanprover-community/repl

View File

@@ -1,154 +0,0 @@
#!/usr/bin/env python3
"""
Generate release steps script for Lean4 repositories.
This script helps automate the release process for Lean4 and its dependent repositories
by generating step-by-step instructions for updating toolchains, creating tags,
and managing branches.
Usage:
python3 release_steps.py <version> <repo>
Arguments:
version: The version to set in the lean-toolchain file (e.g., v4.6.0)
repo: A substring of the repository name as specified in release_repos.yml
Example:
python3 release_steps.py v4.6.0 mathlib
python3 release_steps.py v4.6.0 batt
The script reads repository configurations from release_repos.yml in the same directory.
Each repository may have specific requirements for:
- Branch management
- Toolchain updates
- Dependency updates
- Tagging conventions
- Stable branch handling
"""
import argparse
import yaml
import os
import sys
import re
def load_repos_config(file_path):
with open(file_path, "r") as f:
return yaml.safe_load(f)["repositories"]
def find_repo(repo_substring, config):
pattern = re.compile(re.escape(repo_substring), re.IGNORECASE)
matching_repos = [r for r in config if pattern.search(r["name"])]
if not matching_repos:
print(f"Error: No repository matching '{repo_substring}' found in configuration.")
sys.exit(1)
if len(matching_repos) > 1:
print(f"Error: Multiple repositories matching '{repo_substring}' found in configuration: {', '.join(r['name'] for r in matching_repos)}")
sys.exit(1)
return matching_repos[0]
def generate_script(repo, version, config):
repo_config = find_repo(repo, config)
repo_name = repo_config['name']
repo_url = repo_config['url']
# Extract the last component of the URL, removing the .git extension if present
repo_dir = repo_url.split('/')[-1].replace('.git', '')
default_branch = repo_config.get("branch", "main")
dependencies = repo_config.get("dependencies", [])
requires_tagging = repo_config.get("toolchain-tag", True)
has_stable_branch = repo_config.get("stable-branch", True)
script_lines = [
f"cd {repo_dir}",
"git fetch",
f"git checkout {default_branch} && git pull",
f"git checkout -b bump_to_{version}",
f"echo leanprover/lean4:{version} > lean-toolchain",
]
# Special cases for specific repositories
if repo_name == "REPL":
script_lines.extend([
"lake update",
"cd test/Mathlib",
f"perl -pi -e 's/rev = \"v\\d+\\.\\d+\\.\\d+(-rc\\d+)?\"/rev = \"{version}\"/g' lakefile.toml",
f"echo leanprover/lean4:{version} > lean-toolchain",
"lake update",
"cd ../..",
"./test.sh"
])
elif dependencies:
script_lines.append('echo "Please update the dependencies in lakefile.{lean,toml}"')
script_lines.append("lake update")
script_lines.append("")
script_lines.extend([
f'git commit -am "chore: bump toolchain to {version}"',
""
])
if re.search(r'rc\d+$', version) and repo_name in ["Batteries", "Mathlib"]:
script_lines.extend([
"echo 'This repo has nightly-testing infrastructure'",
f"git merge origin/bump/{version.split('-rc')[0]}",
"echo 'Please resolve any conflicts.'",
""
])
if repo_name != "Mathlib":
script_lines.extend([
"lake build && if lake check-test; then lake test; fi",
""
])
script_lines.extend([
'gh pr create --title "chore: bump toolchain to ' + version + '" --body ""',
"echo 'Please review the PR and merge it.'",
""
])
# Special cases for specific repositories
if repo_name == "ProofWidgets4":
script_lines.append(f"echo 'Note: Follow the version convention of the repository for tagging.'")
elif requires_tagging:
script_lines.append(f"git checkout {default_branch} && git pull")
script_lines.append(f'[ "$(cat lean-toolchain)" = "leanprover/lean4:{version}" ] && git tag -a {version} -m \'Release {version}\' && git push origin --tags || echo "Error: lean-toolchain does not contain expected version {version}"')
if has_stable_branch:
script_lines.extend([
"git checkout stable && git pull",
f"git merge {version} --no-edit",
"git push origin stable"
])
return "\n".join(script_lines)
def main():
parser = argparse.ArgumentParser(
description="Generate release steps script for Lean4 repositories.",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
%(prog)s v4.6.0 mathlib Generate steps for updating Mathlib to v4.6.0
%(prog)s v4.6.0 batt Generate steps for updating Batteries to v4.6.0
The script will generate shell commands to:
1. Update the lean-toolchain file
2. Create appropriate branches and commits
3. Create pull requests
4. Create version tags
5. Update stable branches where applicable"""
)
parser.add_argument("version", help="The version to set in the lean-toolchain file (e.g., v4.6.0)")
parser.add_argument("repo", help="A substring of the repository name as specified in release_repos.yml")
args = parser.parse_args()
config_path = os.path.join(os.path.dirname(__file__), "release_repos.yml")
config = load_repos_config(config_path)
script = generate_script(args.repo, args.version, config)
print(script)
if __name__ == "__main__":
main()

View File

@@ -10,7 +10,7 @@ endif()
include(ExternalProject)
project(LEAN CXX C)
set(LEAN_VERSION_MAJOR 4)
set(LEAN_VERSION_MINOR 20)
set(LEAN_VERSION_MINOR 19)
set(LEAN_VERSION_PATCH 0)
set(LEAN_VERSION_IS_RELEASE 0) # This number is 1 in the release revision, and 0 otherwise.
set(LEAN_SPECIAL_VERSION_DESC "" CACHE STRING "Additional version description like 'nightly-2018-03-11'")
@@ -20,11 +20,6 @@ if (LEAN_SPECIAL_VERSION_DESC)
elseif (NOT LEAN_VERSION_IS_RELEASE)
string(APPEND LEAN_VERSION_STRING "-pre")
endif()
if (LEAN_VERSION_IS_RELEASE)
set(LEAN_MANUAL_ROOT "https://lean-lang.org/doc/reference/${LEAN_VERSION_STRING}/")
else()
set(LEAN_MANUAL_ROOT "")
endif()
set(LEAN_PLATFORM_TARGET "" CACHE STRING "LLVM triple of the target platform")
if (NOT LEAN_PLATFORM_TARGET)
@@ -74,13 +69,12 @@ option(TRACK_LIVE_EXPRS "TRACK_LIVE_EXPRS" OFF)
option(CUSTOM_ALLOCATORS "CUSTOM_ALLOCATORS" ON)
option(SAVE_SNAPSHOT "SAVE_SNAPSHOT" ON)
option(SAVE_INFO "SAVE_INFO" ON)
option(SMALL_ALLOCATOR "SMALL_ALLOCATOR" OFF)
option(SMALL_ALLOCATOR "SMALL_ALLOCATOR" ON)
option(MMAP "MMAP" ON)
option(LAZY_RC "LAZY_RC" OFF)
option(RUNTIME_STATS "RUNTIME_STATS" OFF)
option(BSYMBOLIC "Link with -Bsymbolic to reduce call overhead in shared libraries (Linux)" ON)
option(USE_GMP "USE_GMP" ON)
option(USE_MIMALLOC "use mimalloc" ON)
# development-specific options
option(CHECK_OLEAN_VERSION "Only load .olean files compiled with the current version of Lean" OFF)
@@ -93,11 +87,6 @@ if ("${LAZY_RC}" MATCHES "ON")
set(LEAN_LAZY_RC "#define LEAN_LAZY_RC")
endif()
if (USE_MIMALLOC)
set(SMALL_ALLOCATOR OFF)
set(LEAN_MIMALLOC "#define LEAN_MIMALLOC")
endif()
if ("${SMALL_ALLOCATOR}" MATCHES "ON")
set(LEAN_SMALL_ALLOCATOR "#define LEAN_SMALL_ALLOCATOR")
endif()
@@ -554,9 +543,6 @@ else()
set(LEAN_IS_STAGE0 "#define LEAN_IS_STAGE0 0")
endif()
configure_file("${LEAN_SOURCE_DIR}/config.h.in" "${LEAN_BINARY_DIR}/include/lean/config.h")
if (USE_MIMALLOC)
file(COPY "${LEAN_BINARY_DIR}/../mimalloc/src/mimalloc/include/mimalloc.h" DESTINATION "${LEAN_BINARY_DIR}/include/lean")
endif()
install(DIRECTORY ${LEAN_BINARY_DIR}/include/ DESTINATION include)
configure_file(${LEAN_SOURCE_DIR}/lean.mk.in ${LEAN_BINARY_DIR}/share/lean/lean.mk)
install(DIRECTORY ${LEAN_BINARY_DIR}/share/ DESTINATION share)

View File

@@ -29,11 +29,8 @@ namespace EStateM
variable {ε σ α β : Type u}
/--
Alternative orElse operator that allows callers to select which exception should be used when both
operations fail. The default is to use the first exception since the standard `orElse` uses the
second.
-/
/-- Alternative orElse operator that allows to select which exception should be used.
The default is to use the first exception since the standard `orElse` uses the second. -/
@[always_inline, inline]
protected def orElse' {δ} [Backtrackable δ σ] (x₁ x₂ : EStateM ε σ α) (useFirstEx := true) : EStateM ε σ α := fun s =>
let d := Backtrackable.save s;
@@ -57,11 +54,6 @@ instance : MonadFinally (EStateM ε σ) := {
| Result.error e₂ s => Result.error e₂ s
}
/--
Converts a state monad action into a state monad action with exceptions.
The resulting action does not throw an exception.
-/
@[always_inline, inline] def fromStateM {ε σ α : Type} (x : StateM σ α) : EStateM ε σ α := fun s =>
match x.run s with
| (a, s') => EStateM.Result.ok a s'

View File

@@ -306,10 +306,6 @@ syntax (name := first) "first " withPosition((ppDedent(ppLine) colGe "| " convSe
/-- `try tac` runs `tac` and succeeds even if `tac` failed. -/
macro "try " t:convSeq : conv => `(conv| first | $t | skip)
/--
`tac <;> tac'` runs `tac` on the main goal and `tac'` on each produced goal, concatenating all goals
produced by `tac'`.
-/
macro:1 x:conv tk:" <;> " y:conv:0 : conv =>
`(conv| tactic' => (conv' => $x:conv) <;>%$tk (conv' => $y:conv))

View File

@@ -3893,7 +3893,7 @@ theorem all_map {xs : Array α} {p : β → Bool} : (xs.map f).all p = xs.all (p
/-- Variant of `all_filter` with a side condition for the `stop` argument. -/
@[simp] theorem all_filter' {xs : Array α} {p q : α Bool} (w : stop = (xs.filter p).size) :
(xs.filter p).all q 0 stop = xs.all fun a => !(p a) || q a := by
(xs.filter p).all q 0 stop = xs.all fun a => p a q a := by
subst w
rcases xs with xs
rw [List.filter_toArray]
@@ -3904,7 +3904,7 @@ theorem any_filter {xs : Array α} {p q : α → Bool} :
simp
theorem all_filter {xs : Array α} {p q : α Bool} :
(xs.filter p).all q 0 = xs.all fun a => !(p a) || q a := by
(xs.filter p).all q 0 = xs.all fun a => p a q a := by
simp
/-- Variant of `any_filterMap` with a side condition for the `stop` argument. -/

View File

@@ -33,7 +33,7 @@ section Nat
instance natCastInst : NatCast (BitVec w) := BitVec.ofNat w
/-- Theorem for normalizing the bitvector literal representation. -/
/-- Theorem for normalizing the bit vector literal representation. -/
-- TODO: This needs more usage data to assess which direction the simp should go.
@[simp, bitvec_to_nat] theorem ofNat_eq_ofNat : @OfNat.ofNat (BitVec n) i _ = .ofNat n i := rfl
@@ -48,7 +48,7 @@ section subsingleton
instance : Subsingleton (BitVec 0) where
allEq := by intro 0, _ 0, _; rfl
/-- The empty bitvector. -/
/-- The empty bitvector -/
abbrev nil : BitVec 0 := 0
/-- Every bitvector of length 0 is equal to `nil`, i.e., there is only one empty bitvector -/
@@ -58,11 +58,11 @@ end subsingleton
section zero_allOnes
/-- Returns a bitvector of size `n` where all bits are `0`. -/
/-- Return a bitvector `0` of size `n`. This is the bitvector with all zero bits. -/
protected def zero (n : Nat) : BitVec n := .ofNatLT 0 (Nat.two_pow_pos n)
instance : Inhabited (BitVec n) where default := .zero n
/-- Returns a bitvector of size `n` where all bits are `1`. -/
/-- Bit vector of size `n` where all bits are `1`s -/
def allOnes (n : Nat) : BitVec n :=
.ofNatLT (2^n - 1) (Nat.le_of_eq (Nat.sub_add_cancel (Nat.two_pow_pos n)))
@@ -71,36 +71,36 @@ end zero_allOnes
section getXsb
/--
Returns the `i`th least significant bit.
Return the `i`-th least significant bit.
This will be renamed `getLsb` after the existing deprecated alias is removed.
-/
@[inline] def getLsb' (x : BitVec w) (i : Fin w) : Bool := x.toNat.testBit i
/-- Returns the `i`th least significant bit, or `none` if `i ≥ w`. -/
/-- Return the `i`-th least significant bit or `none` if `i ≥ w`. -/
@[inline] def getLsb? (x : BitVec w) (i : Nat) : Option Bool :=
if h : i < w then some (getLsb' x i, h) else none
/--
Returns the `i`th most significant bit.
Return the `i`-th most significant bit.
This will be renamed `BitVec.getMsb` after the existing deprecated alias is removed.
This will be renamed `getMsb` after the existing deprecated alias is removed.
-/
@[inline] def getMsb' (x : BitVec w) (i : Fin w) : Bool := x.getLsb' w-1-i, by omega
/-- Returns the `i`th most significant bit or `none` if `i ≥ w`. -/
/-- Return the `i`-th most significant bit or `none` if `i ≥ w`. -/
@[inline] def getMsb? (x : BitVec w) (i : Nat) : Option Bool :=
if h : i < w then some (getMsb' x i, h) else none
/-- Returns the `i`th least significant bit or `false` if `i ≥ w`. -/
/-- Return the `i`-th least significant bit or `false` if `i ≥ w`. -/
@[inline] def getLsbD (x : BitVec w) (i : Nat) : Bool :=
x.toNat.testBit i
/-- Returns the `i`th most significant bit, or `false` if `i ≥ w`. -/
/-- Return the `i`-th most significant bit or `false` if `i ≥ w`. -/
@[inline] def getMsbD (x : BitVec w) (i : Nat) : Bool :=
i < w && x.getLsbD (w-1-i)
/-- Returns the most significant bit in a bitvector. -/
/-- Return most-significant bit in bitvector. -/
@[inline] protected def msb (x : BitVec n) : Bool := getMsbD x 0
end getXsb
@@ -129,22 +129,14 @@ end getElem
section Int
/--
Interprets the bitvector as an integer stored in two's complement form.
-/
/-- Interpret the bitvector as an integer stored in two's complement form. -/
protected def toInt (x : BitVec n) : Int :=
if 2 * x.toNat < 2^n then
x.toNat
else
(x.toNat : Int) - (2^n : Nat)
/--
Converts an integer to its two's complement representation as a bitvector of the given width `n`,
over- and underflowing as needed.
The underlying `Nat` is `(2^n + (i mod 2^n)) mod 2^n`. Converting the bitvector back to an `Int`
with `BitVec.toInt` results in the value `i.bmod (2^n)`.
-/
/-- The `BitVec` with value `(2^n + (i mod 2^n)) mod 2^n`. -/
protected def ofInt (n : Nat) (i : Int) : BitVec n := .ofNatLT (i % (Int.ofNat (2^n))).toNat (by
apply (Int.toNat_lt _).mpr
· apply Int.emod_lt_of_pos
@@ -160,7 +152,7 @@ end Int
section Syntax
/-- Notation for bitvector literals. `i#n` is a shorthand for `BitVec.ofNat n i`. -/
/-- Notation for bit vector literals. `i#n` is a shorthand for `BitVec.ofNat n i`. -/
syntax:max num noWs "#" noWs term:max : term
macro_rules | `($i:num#$n) => `(BitVec.ofNat $n $i)
@@ -169,16 +161,16 @@ recommended_spelling "zero" for "0#n" in [BitVec.ofNat, «term__#__»]
/-- not `ofNat_one` -/
recommended_spelling "one" for "1#n" in [BitVec.ofNat, «term__#__»]
/-- Unexpander for bitvector literals. -/
/-- Unexpander for bit vector literals. -/
@[app_unexpander BitVec.ofNat] def unexpandBitVecOfNat : Lean.PrettyPrinter.Unexpander
| `($(_) $n $i:num) => `($i:num#$n)
| _ => throw ()
/-- Notation for bitvector literals without truncation. `i#'lt` is a shorthand for `BitVec.ofNatLT i lt`. -/
/-- Notation for bit vector literals without truncation. `i#'lt` is a shorthand for `BitVec.ofNatLT i lt`. -/
scoped syntax:max term:max noWs "#'" noWs term:max : term
macro_rules | `($i#'$p) => `(BitVec.ofNatLT $i $p)
/-- Unexpander for bitvector literals without truncation. -/
/-- Unexpander for bit vector literals without truncation. -/
@[app_unexpander BitVec.ofNatLT] def unexpandBitVecOfNatLt : Lean.PrettyPrinter.Unexpander
| `($(_) $i $p) => `($i#'$p)
| _ => throw ()
@@ -187,11 +179,7 @@ end Syntax
section repr_toString
/--
Converts a bitvector into a fixed-width hexadecimal number with enough digits to represent it.
If `n` is `0`, then one digit is returned. Otherwise, `⌊(n + 3) / 4⌋` digits are returned.
-/
/-- Convert bitvector into a fixed-width hex number. -/
protected def toHex {n : Nat} (x : BitVec n) : String :=
let s := (Nat.toDigits 16 x.toNat).asString
let t := (List.replicate ((n+3) / 4 - s.length) '0').asString
@@ -205,8 +193,8 @@ end repr_toString
section arithmetic
/--
Negation of bitvectors. This can be interpreted as either signed or unsigned negation modulo `2^n`.
Usually accessed via the `-` prefix operator.
Negation for bit vectors. This can be interpreted as either signed or unsigned negation
modulo `2^n`.
SMT-LIB name: `bvneg`.
-/
@@ -214,13 +202,13 @@ protected def neg (x : BitVec n) : BitVec n := .ofNat n (2^n - x.toNat)
instance : Neg (BitVec n) := .neg
/--
Returns the absolute value of a signed bitvector.
Return the absolute value of a signed bitvector.
-/
protected def abs (x : BitVec n) : BitVec n := if x.msb then .neg x else x
/--
Multiplies two bitvectors. This can be interpreted as either signed or unsigned multiplication
modulo `2^n`. Usually accessed via the `*` operator.
Multiplication for bit vectors. This can be interpreted as either signed or unsigned
multiplication modulo `2^n`.
SMT-LIB name: `bvmul`.
-/
@@ -228,15 +216,14 @@ protected def mul (x y : BitVec n) : BitVec n := BitVec.ofNat n (x.toNat * y.toN
instance : Mul (BitVec n) := .mul
/--
Unsigned division of bitvectors using the Lean convention where division by zero returns zero.
Usually accessed via the `/` operator.
Unsigned division for bit vectors using the Lean convention where division by zero returns zero.
-/
def udiv (x y : BitVec n) : BitVec n :=
(x.toNat / y.toNat)#'(Nat.lt_of_le_of_lt (Nat.div_le_self _ _) x.isLt)
instance : Div (BitVec n) := .udiv
/--
Unsigned modulo for bitvectors. Usually accessed via the `%` operator.
Unsigned modulo for bit vectors.
SMT-LIB name: `bvurem`.
-/
@@ -245,23 +232,24 @@ def umod (x y : BitVec n) : BitVec n :=
instance : Mod (BitVec n) := .umod
/--
Unsigned division of bitvectors using the
[SMT-LIB convention](http://smtlib.cs.uiowa.edu/theories-FixedSizeBitVectors.shtml),
where division by zero returns `BitVector.allOnes n`.
Unsigned division for bit vectors using the
[SMT-LIB convention](http://smtlib.cs.uiowa.edu/theories-FixedSizeBitVectors.shtml)
where division by zero returns the `allOnes` bitvector.
SMT-LIB name: `bvudiv`.
-/
def smtUDiv (x y : BitVec n) : BitVec n := if y = 0 then allOnes n else udiv x y
/--
Signed T-division (using the truncating rounding convention) for bitvectors. This function obeys the
Lean convention that division by zero returns zero.
Signed t-division for bit vectors using the Lean convention where division
by zero returns zero.
Examples:
* `(7#4).sdiv 2 = 3#4`
* `(-9#4).sdiv 2 = -4#4`
* `(5#4).sdiv -2 = -2#4`
* `(-7#4).sdiv (-2) = 3#4`
```lean
sdiv 7#4 2 = 3#4
sdiv (-9#4) 2 = -4#4
sdiv 5#4 -2 = -2#4
sdiv (-7#4) (-2) = 3#4
```
-/
def sdiv (x y : BitVec n) : BitVec n :=
match x.msb, y.msb with
@@ -271,11 +259,9 @@ def sdiv (x y : BitVec n) : BitVec n :=
| true, true => udiv (.neg x) (.neg y)
/--
Signed division for bitvectors using the SMT-LIB using the
[SMT-LIB convention](http://smtlib.cs.uiowa.edu/theories-FixedSizeBitVectors.shtml),
where division by zero returns `BitVector.allOnes n`.
Signed division for bit vectors using SMT-LIB rules for division by zero.
Specifically, `x.smtSDiv 0 = if x >= 0 then -1 else 1`
Specifically, `smtSDiv x 0 = if x >= 0 then -1 else 1`
SMT-LIB name: `bvsdiv`.
-/
@@ -319,7 +305,7 @@ end arithmetic
section bool
/-- Turns a `Bool` into a bitvector of length `1`. -/
/-- Turn a `Bool` into a bitvector of length `1` -/
def ofBool (b : Bool) : BitVec 1 := cond b 1 0
@[simp] theorem ofBool_false : ofBool false = 0 := by trivial
@@ -333,32 +319,32 @@ end bool
section relations
/--
Unsigned less-than for bitvectors.
Unsigned less-than for bit vectors.
SMT-LIB name: `bvult`.
-/
protected def ult (x y : BitVec n) : Bool := x.toNat < y.toNat
/--
Unsigned less-than-or-equal-to for bitvectors.
Unsigned less-than-or-equal-to for bit vectors.
SMT-LIB name: `bvule`.
-/
protected def ule (x y : BitVec n) : Bool := x.toNat y.toNat
/--
Signed less-than for bitvectors.
Signed less-than for bit vectors.
```lean
BitVec.slt 6#4 7 = true
BitVec.slt 7#4 8 = false
```
SMT-LIB name: `bvslt`.
Examples:
* `BitVec.slt 6#4 7 = true`
* `BitVec.slt 7#4 8 = false`
-/
protected def slt (x y : BitVec n) : Bool := x.toInt < y.toInt
/--
Signed less-than-or-equal-to for bitvectors.
Signed less-than-or-equal-to for bit vectors.
SMT-LIB name: `bvsle`.
-/
@@ -368,13 +354,7 @@ end relations
section cast
/--
If two natural numbers `n` and `m` are equal, then a bitvector of width `n` is also a bitvector of
width `m`.
Using `x.cast eq` should be preferred over `eq ▸ x` because there are special-purpose `simp` lemmas
that can more consistently simplify `BitVec.cast` away.
-/
/-- `cast eq x` embeds `x` into an equal `BitVec` type. -/
@[inline] protected def cast (eq : n = m) (x : BitVec n) : BitVec m := .ofNatLT x.toNat (eq x.isLt)
@[simp] theorem cast_ofNat {n m : Nat} (h : n = m) (x : Nat) :
@@ -388,26 +368,23 @@ that can more consistently simplify `BitVec.cast` away.
@[simp] theorem cast_eq {n : Nat} (h : n = n) (x : BitVec n) : x.cast h = x := rfl
/--
Extracts the bits `start` to `start + len - 1` from a bitvector of size `n` to yield a
new bitvector of size `len`. If `start + len > n`, then the bitvector is zero-extended.
Extraction of bits `start` to `start + len - 1` from a bit vector of size `n` to yield a
new bitvector of size `len`. If `start + len > n`, then the vector will be zero-padded in the
high bits.
-/
def extractLsb' (start len : Nat) (x : BitVec n) : BitVec len := .ofNat _ (x.toNat >>> start)
/--
Extracts the bits from `hi` down to `lo` (both inclusive) from a bitvector, which is implicitly
zero-extended if necessary.
The resulting bitvector has size `hi - lo + 1`.
Extraction of bits `hi` (inclusive) down to `lo` (inclusive) from a bit vector of size `n` to
yield a new bitvector of size `hi - lo + 1`.
SMT-LIB name: `extract`.
-/
def extractLsb (hi lo : Nat) (x : BitVec n) : BitVec (hi - lo + 1) := extractLsb' lo _ x
/--
Increases the width of a bitvector to one that is at least as large by zero-extending it.
This is a constant-time operation because the underlying `Nat` is unmodified; because the new width
is at least as large as the old one, no overflow is possible.
A version of `setWidth` that requires a proof the new width is at least as large,
and is a computational noop.
-/
def setWidth' {n w : Nat} (le : n w) (x : BitVec n) : BitVec w :=
x.toNat#'(by
@@ -417,7 +394,8 @@ def setWidth' {n w : Nat} (le : n ≤ w) (x : BitVec n) : BitVec w :=
@[deprecated setWidth' (since := "2024-09-18"), inherit_doc setWidth'] abbrev zeroExtend' := @setWidth'
/--
Returns `zeroExtend (w+n) x <<< n` without needing to compute `x % 2^(2+n)`.
`shiftLeftZeroExtend x n` returns `zeroExtend (w+n) x <<< n` without
needing to compute `x % 2^(2+n)`.
-/
def shiftLeftZeroExtend (msbs : BitVec w) (m : Nat) : BitVec (w + m) :=
let shiftLeftLt {x : Nat} (p : x < 2^w) (m : Nat) : x <<< m < 2^(w + m) := by
@@ -426,18 +404,10 @@ def shiftLeftZeroExtend (msbs : BitVec w) (m : Nat) : BitVec (w + m) :=
exact (Nat.two_pow_pos m)
(msbs.toNat <<< m)#'(shiftLeftLt msbs.isLt m)
/--
Transforms a bitvector of length `w` into a bitvector of length `v`, padding with `0` as needed.
The specific behavior depends on the relationship between the starting width `w` and the final width
`v`:
* If `v > w`, it is zero-extended; the high bits are padded with zeroes until the bitvector has `v`
bits.
* If `v = w`, the bitvector is returned unchanged.
* If `v < w`, the high bits are truncated.
`BitVec.setWidth`, `BitVec.zeroExtend`, and `BitVec.truncate` are aliases for this operation.
Transform `x` of length `w` into a bitvector of length `v`, by either:
- zero extending, that is, adding zeros in the high bits until it has length `v`, if `v > w`, or
- truncating the high bits, if `v < w`.
SMT-LIB name: `zero_extend`.
-/
@@ -447,17 +417,27 @@ def setWidth (v : Nat) (x : BitVec w) : BitVec v :=
else
.ofNat v x.toNat
@[inherit_doc setWidth]
/--
Transform `x` of length `w` into a bitvector of length `v`, by either:
- zero extending, that is, adding zeros in the high bits until it has length `v`, if `v > w`, or
- truncating the high bits, if `v < w`.
SMT-LIB name: `zero_extend`.
-/
abbrev zeroExtend := @setWidth
@[inherit_doc setWidth]
/--
Transform `x` of length `w` into a bitvector of length `v`, by either:
- zero extending, that is, adding zeros in the high bits until it has length `v`, if `v > w`, or
- truncating the high bits, if `v < w`.
SMT-LIB name: `zero_extend`.
-/
abbrev truncate := @setWidth
/--
Transforms a bitvector of length `w` into a bitvector of length `v`, padding as needed with the most
significant bit's value.
If `x` is an empty bitvector, then the sign is treated as zero.
Sign extend a vector of length `w`, extending with `i` additional copies of the most significant
bit in `x`. If `x` is an empty vector, then the sign is treated as zero.
SMT-LIB name: `sign_extend`.
-/
@@ -468,54 +448,57 @@ end cast
section bitwise
/--
Bitwise and for bitvectors. Usually accessed via the `&&&` operator.
Bitwise AND for bit vectors.
```lean
0b1010#4 &&& 0b0110#4 = 0b0010#4
```
SMT-LIB name: `bvand`.
Example:
* `0b1010#4 &&& 0b0110#4 = 0b0010#4`
-/
protected def and (x y : BitVec n) : BitVec n :=
(x.toNat &&& y.toNat)#'(Nat.and_lt_two_pow x.toNat y.isLt)
instance : AndOp (BitVec w) := .and
/--
Bitwise or for bitvectors. Usually accessed via the `|||` operator.
Bitwise OR for bit vectors.
```lean
0b1010#4 ||| 0b0110#4 = 0b1110#4
```
SMT-LIB name: `bvor`.
Example:
* `0b1010#4 ||| 0b0110#4 = 0b1110#4`
-/
protected def or (x y : BitVec n) : BitVec n :=
(x.toNat ||| y.toNat)#'(Nat.or_lt_two_pow x.isLt y.isLt)
instance : OrOp (BitVec w) := .or
/--
Bitwise xor for bitvectors. Usually accessed via the `^^^` operator.
Bitwise XOR for bit vectors.
```lean
0b1010#4 ^^^ 0b0110#4 = 0b1100#4
```
SMT-LIB name: `bvxor`.
Example:
* `0b1010#4 ^^^ 0b0110#4 = 0b1100#4`
-/
protected def xor (x y : BitVec n) : BitVec n :=
(x.toNat ^^^ y.toNat)#'(Nat.xor_lt_two_pow x.isLt y.isLt)
instance : Xor (BitVec w) := .xor
/--
Bitwise complement for bitvectors. Usually accessed via the `~~~` prefix operator.
Bitwise NOT for bit vectors.
```lean
~~~(0b0101#4) == 0b1010
```
SMT-LIB name: `bvnot`.
Example:
* `~~~(0b0101#4) == 0b1010`
-/
protected def not (x : BitVec n) : BitVec n := allOnes n ^^^ x
instance : Complement (BitVec w) := .not
/--
Shifts a bitvector to the left. The low bits are filled with zeros. As a numeric operation, this is
Left shift for bit vectors. The low bits are filled with zeros. As a numeric operation, this is
equivalent to `x * 2^s`, modulo `2^n`.
SMT-LIB name: `bvshl` except this operator uses a `Nat` shift value.
@@ -524,9 +507,7 @@ protected def shiftLeft (x : BitVec n) (s : Nat) : BitVec n := BitVec.ofNat n (x
instance : HShiftLeft (BitVec w) Nat (BitVec w) := .shiftLeft
/--
Shifts a bitvector to the right. This is a logical right shift - the high bits are filled with
zeros.
(Logical) right shift for bit vectors. The high bits are filled with zeros.
As a numeric operation, this is equivalent to `x / 2^s`, rounding down.
SMT-LIB name: `bvlshr` except this operator uses a `Nat` shift value.
@@ -541,9 +522,8 @@ def ushiftRight (x : BitVec n) (s : Nat) : BitVec n :=
instance : HShiftRight (BitVec w) Nat (BitVec w) := .ushiftRight
/--
Shifts a bitvector to the right. This is an arithmetic right shift - the high bits are filled with
most significant bit's value.
Arithmetic right shift for bit vectors. The high bits are filled with the
most-significant bit.
As a numeric operation, this is equivalent to `x.toInt >>> s`.
SMT-LIB name: `bvashr` except this operator uses a `Nat` shift value.
@@ -554,9 +534,8 @@ instance {n} : HShiftLeft (BitVec m) (BitVec n) (BitVec m) := ⟨fun x y => x <
instance {n} : HShiftRight (BitVec m) (BitVec n) (BitVec m) := fun x y => x >>> y.toNat
/--
Shifts a bitvector to the right. This is an arithmetic right shift - the high bits are filled with
most significant bit's value.
Arithmetic right shift for bit vectors. The high bits are filled with the
most-significant bit.
As a numeric operation, this is equivalent to `a.toInt >>> s.toNat`.
SMT-LIB name: `bvashr`.
@@ -569,15 +548,13 @@ def rotateLeftAux (x : BitVec w) (n : Nat) : BitVec w :=
x <<< n ||| x >>> (w - n)
/--
Rotates the bits in a bitvector to the left.
Rotate left for bit vectors. All the bits of `x` are shifted to higher positions, with the top `n`
bits wrapping around to fill the low bits.
All the bits of `x` are shifted to higher positions, with the top `n` bits wrapping around to fill
the vacated low bits.
SMT-LIB name: `rotate_left`, except this operator uses a `Nat` shift amount.
Example:
* `(0b0011#4).rotateLeft 3 = 0b1001`
```lean
rotateLeft 0b0011#4 3 = 0b1001
```
SMT-LIB name: `rotate_left` except this operator uses a `Nat` shift amount.
-/
def rotateLeft (x : BitVec w) (n : Nat) : BitVec w := rotateLeftAux x (n % w)
@@ -590,26 +567,21 @@ def rotateRightAux (x : BitVec w) (n : Nat) : BitVec w :=
x >>> n ||| x <<< (w - n)
/--
Rotates the bits in a bitvector to the right.
Rotate right for bit vectors. All the bits of `x` are shifted to lower positions, with the
bottom `n` bits wrapping around to fill the high bits.
All the bits of `x` are shifted to lower positions, with the bottom `n` bits wrapping around to fill
the vacated high bits.
SMT-LIB name: `rotate_right`, except this operator uses a `Nat` shift amount.
Example:
* `rotateRight 0b01001#5 1 = 0b10100`
```lean
rotateRight 0b01001#5 1 = 0b10100
```
SMT-LIB name: `rotate_right` except this operator uses a `Nat` shift amount.
-/
def rotateRight (x : BitVec w) (n : Nat) : BitVec w := rotateRightAux x (n % w)
/--
Concatenates two bitvectors using the big-endian convention that the more significant
input is on the left. Usually accessed via the `++` operator.
Concatenation of bitvectors. This uses the "big endian" convention that the more significant
input is on the left, so `0xAB#8 ++ 0xCD#8 = 0xABCD#16`.
SMT-LIB name: `concat`.
Example:
* `0xAB#8 ++ 0xCD#8 = 0xABCD#16`.
-/
def append (msbs : BitVec n) (lsbs : BitVec m) : BitVec (n+m) :=
shiftLeftZeroExtend msbs m ||| setWidth' (Nat.le_add_left m n) lsbs
@@ -617,7 +589,7 @@ def append (msbs : BitVec n) (lsbs : BitVec m) : BitVec (n+m) :=
instance : HAppend (BitVec w) (BitVec v) (BitVec (w + v)) := .append
-- TODO: write this using multiplication
/-- Concatenates `i` copies of `x` into a new vector of length `w * i`. -/
/-- `replicate i x` concatenates `i` copies of `x` into a new vector of length `w*i`. -/
def replicate : (i : Nat) BitVec w BitVec (w*i)
| 0, _ => 0#0
| n+1, x =>
@@ -636,18 +608,14 @@ result of appending a single bit to the front in the naive implementation).
def concat {n} (msbs : BitVec n) (lsb : Bool) : BitVec (n+1) := msbs ++ (ofBool lsb)
/--
Shifts all bits of `x` to the left by `1` and sets the least significant bit to `b`.
This is a non-dependent version of `BitVec.concat` that does not change the total bitwidth.
`x.shiftConcat b` shifts all bits of `x` to the left by `1` and sets the least significant bit to `b`.
It is a non-dependent version of `concat` that does not change the total bitwidth.
-/
def shiftConcat (x : BitVec n) (b : Bool) : BitVec n :=
(x.concat b).truncate n
/--
Prepends a single bit to the front of a bitvector, using big-endian order (see `append`).
The new bit is the most significant bit.
-/
/-- Prepend a single bit to the front of a bitvector, using big endian order (see `append`).
That is, the new bit is the most significant bit. -/
def cons {n} (msb : Bool) (lsbs : BitVec n) : BitVec (n+1) :=
((ofBool msb) ++ lsbs).cast (Nat.add_comm ..)
@@ -660,18 +628,15 @@ theorem ofBool_append (msb : Bool) (lsbs : BitVec w) :
rfl
/--
`twoPow w i` is the bitvector `2^i` if `i < w`, and `0` otherwise. In other words, it is 2 to the
power `i`.
From the bitwise point of view, it has the `i`th bit as `1` and all other bits as `0`.
`twoPow w i` is the bitvector `2^i` if `i < w`, and `0` otherwise.
That is, 2 to the power `i`.
For the bitwise point of view, it has the `i`th bit as `1` and all other bits as `0`.
-/
def twoPow (w : Nat) (i : Nat) : BitVec w := 1#w <<< i
end bitwise
/--
Computes a hash of a bitvector, combining 64-bit words using `mixHash`.
-/
/-- Compute a hash of a bitvector, combining 64-bit words using `mixHash`. -/
def hash (bv : BitVec n) : UInt64 :=
if n 64 then
bv.toFin.val.toUInt64
@@ -699,63 +664,57 @@ section normalization_eqs
@[simp] theorem zero_eq : BitVec.zero n = 0#n := rfl
end normalization_eqs
/-- Converts a list of `Bool`s into a big-endian `BitVec`. -/
/-- Converts a list of `Bool`s to a big-endian `BitVec`. -/
def ofBoolListBE : (bs : List Bool) BitVec bs.length
| [] => 0#0
| b :: bs => cons b (ofBoolListBE bs)
/-- Converts a list of `Bool`s into a little-endian `BitVec`. -/
/-- Converts a list of `Bool`s to a little-endian `BitVec`. -/
def ofBoolListLE : (bs : List Bool) BitVec bs.length
| [] => 0#0
| b :: bs => concat (ofBoolListLE bs) b
/-! ## Overflow -/
/--
Checks whether addition of `x` and `y` results in *unsigned* overflow.
/-- `uaddOverflow x y` returns `true` if addition of `x` and `y` results in *unsigned* overflow.
SMT-LIB name: `bvuaddo`.
SMT-LIB name: `bvuaddo`.
-/
def uaddOverflow {w : Nat} (x y : BitVec w) : Bool := x.toNat + y.toNat 2 ^ w
/--
Checks whether addition of `x` and `y` results in *signed* overflow, treating `x` and `y` as 2's
complement signed bitvectors.
/-- `saddOverflow x y` returns `true` if addition of `x` and `y` results in *signed* overflow,
treating `x` and `y` as 2's complement signed bitvectors.
SMT-LIB name: `bvsaddo`.
SMT-LIB name: `bvsaddo`.
-/
def saddOverflow {w : Nat} (x y : BitVec w) : Bool :=
(x.toInt + y.toInt 2 ^ (w - 1)) || (x.toInt + y.toInt < - 2 ^ (w - 1))
/--
Checks whether subtraction of `x` and `y` results in *unsigned* overflow.
/-- `usubOverflow x y` returns `true` if the subtraction of `x` and `y` results in *unsigned* overflow.
SMT-Lib name: `bvusubo`.
SMT-Lib name: `bvusubo`.
-/
def usubOverflow {w : Nat} (x y : BitVec w) : Bool := x.toNat < y.toNat
/--
Checks whether the subtraction of `x` and `y` results in *signed* overflow, treating `x` and `y` as
2's complement signed bitvectors.
/-- `ssubOverflow x y` returns `true` if the subtraction of `x` and `y` results in *signed* overflow,
treating `x` and `y` as 2's complement signed bitvectors.
SMT-Lib name: `bvssubo`.
SMT-Lib name: `bvssubo`.
-/
def ssubOverflow {w : Nat} (x y : BitVec w) : Bool :=
(x.toInt - y.toInt 2 ^ (w - 1)) || (x.toInt - y.toInt < - 2 ^ (w - 1))
/--
Checks whether the negation of a bitvector results in overflow.
/-- `negOverflow x` returns `true` if the negation of `x` results in overflow.
For a BitVec `x` with width `0 < w`, this only happens if `x = intMin`.
For a bitvector `x` with nonzero width, this only happens if `x = intMin`.
SMT-Lib name: `bvnego`.
SMT-Lib name: `bvnego`.
-/
def negOverflow {w : Nat} (x : BitVec w) : Bool :=
x.toInt == - 2 ^ (w - 1)
/- ### reverse -/
/-- Reverses the bits in a bitvector. -/
/-- Reverse the bits in a bitvector. -/
def reverse : {w : Nat} BitVec w BitVec w
| 0, x => x
| w + 1, x => concat (reverse (x.truncate w)) (x.msb)

View File

@@ -17,9 +17,7 @@ namespace BitVec
section Nat
/--
The bitvector with value `i mod 2^n`.
-/
/-- The `BitVec` with value `i mod 2^n`. -/
@[match_pattern]
protected def ofNat (n : Nat) (i : Nat) : BitVec n where
toFin := Fin.ofNat' (2^n) i
@@ -34,8 +32,8 @@ end Nat
section arithmetic
/--
Adds two bitvectors. This can be interpreted as either signed or unsigned addition modulo `2^n`.
Usually accessed via the `+` operator.
Addition for bit vectors. This can be interpreted as either signed or unsigned addition
modulo `2^n`.
SMT-LIB name: `bvadd`.
-/
@@ -43,9 +41,8 @@ protected def add (x y : BitVec n) : BitVec n := .ofNat n (x.toNat + y.toNat)
instance : Add (BitVec n) := BitVec.add
/--
Subtracts one bitvector from another. This can be interpreted as either signed or unsigned subtraction
modulo `2^n`. Usually accessed via the `-` operator.
Subtraction for bit vectors. This can be interpreted as either signed or unsigned subtraction
modulo `2^n`.
-/
protected def sub (x y : BitVec n) : BitVec n := .ofNat n ((2^n - y.toNat) + x.toNat)
instance : Sub (BitVec n) := BitVec.sub

View File

@@ -9,7 +9,7 @@ import Init.Data.Nat.Mod
import Init.Data.Int.LemmasAux
/-!
# Bit blasting of bitvectors
# Bitblasting of bitvectors
This module provides theorems for showing the equivalence between BitVec operations using
the `Fin 2^n` representation and Boolean vectors. It is still under development, but
@@ -19,21 +19,21 @@ as vectors of bits into proofs about Lean `BitVec` values.
The module is named for the bit-blasting operation in an SMT solver that converts bitvector
expressions into expressions about individual bits in each vector.
### Example: How bit blasting works for multiplication
### Example: How bitblasting works for multiplication
We explain how the lemmas here are used for bit blasting,
We explain how the lemmas here are used for bitblasting,
by using multiplication as a prototypical example.
Other bit blasters for other operations follow the same pattern.
To bit blast a multiplication of the form `x * y`,
Other bitblasters for other operations follow the same pattern.
To bitblast a multiplication of the form `x * y`,
we must unfold the above into a form that the SAT solver understands.
We assume that the solver already knows how to bit blast addition.
We assume that the solver already knows how to bitblast addition.
This is known to `bv_decide`, by exploiting the lemma `add_eq_adc`,
which says that `x + y : BitVec w` equals `(adc x y false).2`,
where `adc` builds an add-carry circuit in terms of the primitive operations
(bitwise and, bitwise or, bitwise xor) that bv_decide already understands.
In this way, we layer bit blasters on top of each other,
by reducing the multiplication bit blaster to an addition operation.
In this way, we layer bitblasters on top of each other,
by reducing the multiplication bitblaster to an addition operation.
The core lemma is given by `getLsbD_mul`:
@@ -65,7 +65,7 @@ mulRec_succ_eq
By repeatedly applying the lemmas `mulRec_zero_eq` and `mulRec_succ_eq`,
one obtains a circuit for multiplication.
Note that this circuit uses `BitVec.add`, `BitVec.getLsbD`, `BitVec.shiftLeft`.
Here, `BitVec.add` and `BitVec.shiftLeft` are (recursively) bit blasted by `bv_decide`,
Here, `BitVec.add` and `BitVec.shiftLeft` are (recursively) bitblasted by `bv_decide`,
using the lemmas `add_eq_adc` and `shiftLeft_eq_shiftLeftRec`,
and `BitVec.getLsbD` is a primitive that `bv_decide` knows how to reduce to SAT.
@@ -88,10 +88,10 @@ computes the correct value for multiplication.
To zoom out, therefore, we follow two steps:
First, we prove bitvector lemmas to unfold a high-level operation (such as multiplication)
into already bit blastable operations (such as addition and left shift).
into already bitblastable operations (such as addition and left shift).
We then use these lemmas to prove the correctness of the circuit that `bv_decide` builds.
We use this workflow to implement bit blasting for all SMT-LIB v2 operations.
We use this workflow to implement bitblasting for all SMT-LIB v2 operations.
## Main results
* `x + y : BitVec w` is `(adc x y false).2`.
@@ -567,19 +567,19 @@ theorem slt_eq_not_ult_of_msb_neq {x y : BitVec w} (h : x.msb ≠ y.msb) :
simp only [BitVec.slt, toInt_eq_msb_cond, Bool.eq_not_of_ne h, ult_eq_msb_of_msb_neq h]
cases y.msb <;> (simp; omega)
theorem slt_eq_ult {x y : BitVec w} :
theorem slt_eq_ult (x y : BitVec w) :
x.slt y = (x.msb != y.msb).xor (x.ult y) := by
by_cases h : x.msb = y.msb
· simp [h, slt_eq_ult_of_msb_eq]
· have h' : x.msb != y.msb := by simp_all
simp [slt_eq_not_ult_of_msb_neq h, h']
theorem slt_eq_not_carry {x y : BitVec w} :
theorem slt_eq_not_carry (x y : BitVec w) :
x.slt y = (x.msb == y.msb).xor (carry w x (~~~y) true) := by
simp only [slt_eq_ult, bne, ult_eq_not_carry]
cases x.msb == y.msb <;> simp
theorem sle_eq_not_slt {x y : BitVec w} : x.sle y = !y.slt x := by
theorem sle_eq_not_slt (x y : BitVec w) : x.sle y = !y.slt x := by
simp only [BitVec.sle, BitVec.slt, decide_not, decide_eq_decide]; omega
theorem zero_sle_eq_not_msb {w : Nat} {x : BitVec w} : BitVec.sle 0#w x = !x.msb := by
@@ -588,14 +588,14 @@ theorem zero_sle_eq_not_msb {w : Nat} {x : BitVec w} : BitVec.sle 0#w x = !x.msb
theorem zero_sle_iff_msb_eq_false {w : Nat} {x : BitVec w} : BitVec.sle 0#w x x.msb = false := by
simp [zero_sle_eq_not_msb]
theorem toNat_toInt_of_sle {w : Nat} {x : BitVec w} (hx : BitVec.sle 0#w x) : x.toInt.toNat = x.toNat :=
theorem toNat_toInt_of_sle {w : Nat} (x : BitVec w) (hx : BitVec.sle 0#w x) : x.toInt.toNat = x.toNat :=
toNat_toInt_of_msb x (zero_sle_iff_msb_eq_false.1 hx)
theorem sle_eq_carry {x y : BitVec w} :
theorem sle_eq_carry (x y : BitVec w) :
x.sle y = !((x.msb == y.msb).xor (carry w y (~~~x) true)) := by
rw [sle_eq_not_slt, slt_eq_not_carry, beq_comm]
theorem neg_slt_zero (h : 0 < w) {x : BitVec w} :
theorem neg_slt_zero (h : 0 < w) (x : BitVec w) :
(-x).slt 0#w = ((x == intMin w) || (0#w).slt x) := by
rw [slt_zero_eq_msb, msb_neg, slt_eq_sle_and_ne, zero_sle_eq_not_msb]
apply Bool.eq_iff_iff.2
@@ -608,23 +608,16 @@ theorem neg_slt_zero (h : 0 < w) {x : BitVec w} :
rintro rfl
simp at hmsb
theorem neg_sle_zero (h : 0 < w) {x : BitVec w} :
theorem neg_sle_zero (h : 0 < w) (x : BitVec w) :
(-x).sle 0#w = (x == intMin w || (0#w).sle x) := by
rw [sle_eq_slt_or_eq, neg_slt_zero h, sle_eq_slt_or_eq]
simp [Bool.beq_eq_decide_eq (-x), Bool.beq_eq_decide_eq _ x, Eq.comm (a := x), Bool.or_assoc]
theorem sle_eq_ule {x y : BitVec w} : x.sle y = (x.msb != y.msb ^^ x.ule y) := by
rw [sle_eq_not_slt, slt_eq_ult, Bool.xor_not, ule_eq_not_ult, bne_comm]
theorem sle_eq_ule_of_msb_eq {x y : BitVec w} (h : x.msb = y.msb) : x.sle y = x.ule y := by
simp [BitVec.sle_eq_ule, h]
/-! ### mul recurrence for bit blasting -/
/-! ### mul recurrence for bitblasting -/
/--
A recurrence that describes multiplication as repeated addition.
This function is useful for bit blasting multiplication.
Is useful for bitblasting multiplication.
-/
def mulRec (x y : BitVec w) (s : Nat) : BitVec w :=
let cur := if y.getLsbD s then (x <<< s) else 0
@@ -724,16 +717,15 @@ theorem getElem_mul {x y : BitVec w} {i : Nat} (h : i < w) :
(x * y)[i] = (mulRec x y w)[i] := by
simp [mulRec_eq_mul_signExtend_setWidth]
/-! ## shiftLeft recurrence for bit blasting -/
/-! ## shiftLeft recurrence for bitblasting -/
/--
Shifts `x` to the left by the first `n` bits of `y`.
`shiftLeftRec x y n` shifts `x` to the left by the first `n` bits of `y`.
The theorem `BitVec.shiftLeft_eq_shiftLeftRec` proves the equivalence of `(x <<< y)` and
`BitVec.shiftLeftRec x y`.
The theorem `shiftLeft_eq_shiftLeftRec` proves the equivalence of `(x <<< y)` and `shiftLeftRec`.
Together with equations `BitVec.shiftLeftRec_zero` and `BitVec.shiftLeftRec_succ`, this allows
`BitVec.shiftLeft` to be unfolded into a circuit for bit blasting.
Together with equations `shiftLeftRec_zero`, `shiftLeftRec_succ`,
this allows us to unfold `shiftLeft` into a circuit for bitblasting.
-/
def shiftLeftRec (x : BitVec w₁) (y : BitVec w₂) (n : Nat) : BitVec w₁ :=
let shiftAmt := (y &&& (twoPow w₂ n))
@@ -787,7 +779,7 @@ theorem shiftLeftRec_eq {x : BitVec w₁} {y : BitVec w₂} {n : Nat} :
/--
Show that `x <<< y` can be written in terms of `shiftLeftRec`.
This can be unfolded in terms of `shiftLeftRec_zero`, `shiftLeftRec_succ` for bit blasting.
This can be unfolded in terms of `shiftLeftRec_zero`, `shiftLeftRec_succ` for bitblasting.
-/
theorem shiftLeft_eq_shiftLeftRec (x : BitVec w₁) (y : BitVec w₂) :
x <<< y = shiftLeftRec x y (w₂ - 1) := by
@@ -795,7 +787,7 @@ theorem shiftLeft_eq_shiftLeftRec (x : BitVec w₁) (y : BitVec w₂) :
· simp [of_length_zero]
· simp [shiftLeftRec_eq]
/-! # udiv/urem recurrence for bit blasting
/-! # udiv/urem recurrence for bitblasting
In order to prove the correctness of the division algorithm on the integers,
one shows that `n.div d = q` and `n.mod d = r` iff `n = d * q + r` and `0 ≤ r < d`.
@@ -1002,9 +994,8 @@ def DivModState.wr_lt_w {qr : DivModState w} (h : qr.Poised args) : qr.wr < w :=
/-! ### Division shift subtractor -/
/--
One round of the division algorithm. It tries to perform a subtract shift.
This should only be called when `r.msb = false`, so it will not overflow.
One round of the division algorithm, that tries to perform a subtract shift.
Note that this should only be called when `r.msb = false`, so we will not overflow.
-/
def divSubtractShift (args : DivModArgs w) (qr : DivModState w) : DivModState w :=
let {n, d} := args
@@ -1094,7 +1085,7 @@ theorem lawful_divSubtractShift (qr : DivModState w) (h : qr.Poised args) :
/-! ### Core division algorithm circuit -/
/-- A recursive definition of division for bit blasting, in terms of a shift-subtraction circuit. -/
/-- A recursive definition of division for bitblasting, in terms of a shift-subtraction circuit. -/
def divRec {w : Nat} (m : Nat) (args : DivModArgs w) (qr : DivModState w) :
DivModState w :=
match m with
@@ -1191,12 +1182,10 @@ theorem getMsbD_udiv (n d : BitVec w) (hd : 0#w < d) (i : Nat) :
/- ### Arithmetic shift right (sshiftRight) recurrence -/
/--
Shifts `x` arithmetically (signed) to the right by the first `n` bits of `y`.
The theorem `BitVec.sshiftRight_eq_sshiftRightRec` proves the equivalence of `(x.sshiftRight y)` and
`BitVec.sshiftRightRec x y`. Together with equations `BitVec.sshiftRightRec_zero`, and
`BitVec.sshiftRightRec_succ`, this allows `BitVec.sshiftRight` to be unfolded into a circuit for
bit blasting.
`sshiftRightRec x y n` shifts `x` arithmetically/signed to the right by the first `n` bits of `y`.
The theorem `sshiftRight_eq_sshiftRightRec` proves the equivalence of `(x.sshiftRight y)` and `sshiftRightRec`.
Together with equations `sshiftRightRec_zero`, `sshiftRightRec_succ`,
this allows us to unfold `sshiftRight` into a circuit for bitblasting.
-/
def sshiftRightRec (x : BitVec w₁) (y : BitVec w₂) (n : Nat) : BitVec w₁ :=
let shiftAmt := (y &&& (twoPow w₂ n))
@@ -1243,7 +1232,7 @@ theorem sshiftRightRec_eq (x : BitVec w₁) (y : BitVec w₂) (n : Nat) :
/--
Show that `x.sshiftRight y` can be written in terms of `sshiftRightRec`.
This can be unfolded in terms of `sshiftRightRec_zero_eq`, `sshiftRightRec_succ_eq` for bit blasting.
This can be unfolded in terms of `sshiftRightRec_zero_eq`, `sshiftRightRec_succ_eq` for bitblasting.
-/
theorem sshiftRight_eq_sshiftRightRec (x : BitVec w₁) (y : BitVec w₂) :
(x.sshiftRight' y).getLsbD i = (sshiftRightRec x y (w₂ - 1)).getLsbD i := by
@@ -1251,16 +1240,16 @@ theorem sshiftRight_eq_sshiftRightRec (x : BitVec w₁) (y : BitVec w₂) :
· simp [of_length_zero]
· simp [sshiftRightRec_eq]
/- ### Logical shift right (ushiftRight) recurrence for bit blasting -/
/- ### Logical shift right (ushiftRight) recurrence for bitblasting -/
/--
Shifts `x` logically to the right by the first `n` bits of `y`.
`ushiftRightRec x y n` shifts `x` logically to the right by the first `n` bits of `y`.
The theorem `BitVec.shiftRight_eq_ushiftRightRec` proves the equivalence
of `(x >>> y)` and `BitVec.ushiftRightRec`.
The theorem `shiftRight_eq_ushiftRightRec` proves the equivalence
of `(x >>> y)` and `ushiftRightRec`.
Together with equations `BitVec.ushiftRightRec_zero` and `BitVec.ushiftRightRec_succ`,
this allows `BitVec.ushiftRight` to be unfolded into a circuit for bit blasting.
Together with equations `ushiftRightRec_zero`, `ushiftRightRec_succ`,
this allows us to unfold `ushiftRight` into a circuit for bitblasting.
-/
def ushiftRightRec (x : BitVec w₁) (y : BitVec w₂) (n : Nat) : BitVec w₁ :=
let shiftAmt := (y &&& (twoPow w₂ n))
@@ -1306,7 +1295,7 @@ theorem ushiftRightRec_eq (x : BitVec w₁) (y : BitVec w₂) (n : Nat) :
/--
Show that `x >>> y` can be written in terms of `ushiftRightRec`.
This can be unfolded in terms of `ushiftRightRec_zero`, `ushiftRightRec_succ` for bit blasting.
This can be unfolded in terms of `ushiftRightRec_zero`, `ushiftRightRec_succ` for bitblasting.
-/
theorem shiftRight_eq_ushiftRightRec (x : BitVec w₁) (y : BitVec w₂) :
x >>> y = ushiftRightRec x y (w₂ - 1) := by
@@ -1653,52 +1642,9 @@ theorem toInt_sdiv (a b : BitVec w) : (a.sdiv b).toInt = (a.toInt.tdiv b.toInt).
conv => lhs; rw [(by omega: w = (w - 1) + 1)]
simp [Nat.pow_succ, Int.natCast_pow, Int.mul_comm]
· rw [ toInt_bmod_cancel]
rw [BitVec.toInt_sdiv_of_ne_or_ne _ _ (by simpa only [Decidable.not_and_iff_not_or_not] using h)]
rw [BitVec.toInt_sdiv_of_ne_or_ne _ _ (by simpa only [Classical.not_and_iff_not_or_not] using h)]
theorem msb_umod_eq_false_of_left {x : BitVec w} (hx : x.msb = false) (y : BitVec w) : (x % y).msb = false := by
rw [msb_eq_false_iff_two_mul_lt] at hx
rw [toNat_umod]
refine Nat.lt_of_le_of_lt ?_ hx
rw [Nat.mul_le_mul_left_iff (by decide)]
exact Nat.mod_le _ _
theorem msb_umod_of_le_of_ne_zero_of_le {x y : BitVec w}
(hx : x intMin w) (hy : y 0#w) (hy' : y intMin w) : (x % y).msb = false := by
simp only [msb_umod, Bool.and_eq_false_imp, Bool.or_eq_false_iff, decide_eq_false_iff_not,
BitVec.not_lt, beq_eq_false_iff_ne, ne_eq, hy, not_false_eq_true, _root_.and_true]
intro h
rw [ intMin_le_iff_msb_eq_true (length_pos_of_ne hy)] at h
rwa [BitVec.le_antisymm hx h]
@[simp]
theorem toInt_srem (x y : BitVec w) : (x.srem y).toInt = x.toInt.tmod y.toInt := by
rw [srem_eq]
by_cases hyz : y = 0#w
· simp only [hyz, ofNat_eq_ofNat, msb_zero, umod_zero, neg_zero, neg_neg, toInt_zero, Int.tmod_zero]
cases x.msb <;> rfl
cases h : x.msb
· cases h' : y.msb
· dsimp only
rw [toInt_eq_toNat_of_msb (msb_umod_eq_false_of_left h y), toNat_umod]
rw [toInt_eq_toNat_of_msb h, toInt_eq_toNat_of_msb h', Int.ofNat_tmod]
· dsimp only
rw [toInt_eq_toNat_of_msb (msb_umod_eq_false_of_left h _), toNat_umod]
rw [toInt_eq_toNat_of_msb h, toInt_eq_neg_toNat_neg_of_msb_true h']
rw [Int.tmod_neg, Int.ofNat_tmod]
· cases h' : y.msb
· dsimp only
rw [toInt_eq_neg_toNat_neg_of_msb_true h, toInt_eq_toNat_of_msb h', Int.neg_tmod]
rw [ Int.ofNat_tmod, toNat_umod, toInt_neg_eq_of_msb ?msb, toInt_eq_toNat_of_msb ?msb]
rw [BitVec.msb_umod_of_le_of_ne_zero_of_le (neg_le_intMin_of_msb_eq_true h) hyz]
exact le_intMin_of_msb_eq_false h'
· dsimp only
rw [toInt_eq_neg_toNat_neg_of_msb_true h, toInt_eq_neg_toNat_neg_of_msb_true h', Int.neg_tmod, Int.tmod_neg]
rw [ Int.ofNat_tmod, toNat_umod, toInt_neg_eq_of_msb ?msb', toInt_eq_toNat_of_msb ?msb']
rw [BitVec.msb_umod_of_le_of_ne_zero_of_le (neg_le_intMin_of_msb_eq_true h)
((not_congr neg_eq_zero_iff).mpr hyz)]
exact neg_le_intMin_of_msb_eq_true h'
/-! ### Lemmas that use bit blasting circuits -/
/-! ### Lemmas that use Bitblasting circuits -/
theorem add_sub_comm {x y : BitVec w} : x + y - z = x - z + y := by
apply eq_of_toNat_eq
@@ -1752,22 +1698,4 @@ theorem extractLsb'_mul {w len} {x y : BitVec w} (hlen : len ≤ w) :
(x * y).extractLsb' 0 len = (x.extractLsb' 0 len) * (y.extractLsb' 0 len) := by
simp [ setWidth_eq_extractLsb' hlen, setWidth_mul _ _ hlen]
/-- Adding bitvectors that are zero in complementary positions equals concatenation. -/
theorem append_add_append_eq_append {v w : Nat} {x : BitVec v} {y : BitVec w} :
(x ++ 0#w) + (0#v ++ y) = x ++ y := by
rw [add_eq_or_of_and_eq_zero] <;> ext i <;> simp
/-- Heuristically, `y <<< x` is much larger than `x`,
and hence low bits of `y <<< x`. Thus, `x + (y <<< x) = x ||| (y <<< x).` -/
theorem add_shifLeft_eq_or_shiftLeft {x y : BitVec w} :
x + (y <<< x) = x ||| (y <<< x) := by
rw [add_eq_or_of_and_eq_zero]
ext i hi
simp only [shiftLeft_eq', getElem_and, getElem_shiftLeft, getElem_zero, and_eq_false_imp,
not_eq_eq_eq_not, Bool.not_true, decide_eq_false_iff_not, Nat.not_lt]
intros hxi hxval
have : 2^i x.toNat := two_pow_le_toNat_of_getElem_eq_true hi hxi
have : i < 2^i := by exact Nat.lt_two_pow_self
omega
end BitVec

View File

@@ -13,18 +13,15 @@ set_option linter.missingDocs true
namespace BitVec
/--
Constructs a bitvector by iteratively computing a state for each bit using the function `f`,
starting with the initial state `s`. At each step, the prior state and the current bit index are
passed to `f`, and it produces a bit along with the next state value. These bits are assembled into
the final bitvector.
iunfoldr is an iterative operation that applies a function `f` repeatedly.
It produces a sequence of state values `[s_0, s_1 .. s_w]` and a bitvector `v` where `f i s_i =
(s_{i+1}, b_i)` and `b_i` is bit `i`th least-significant bit in `v` (e.g., `getLsb v i = b_i`).
It produces a sequence of state values `[s_0, s_1 .. s_w]` and a bitvector
`v` where `f i s_i = (s_{i+1}, b_i)` and `b_i` is bit `i`th least-significant bit
in `v` (e.g., `getLsb v i = b_i`).
The theorem `iunfoldr_replace` allows uses of `BitVec.iunfoldr` to be replaced wiht declarative
specifications that are easier to reason about.
Theorems involving `iunfoldr` can be eliminated using `iunfoldr_replace` below.
-/
def iunfoldr (f : Fin w α α × Bool) (s : α) : α × BitVec w :=
def iunfoldr (f : Fin w -> α α × Bool) (s : α) : α × BitVec w :=
Fin.hIterate (fun i => α × BitVec i) (s, nil) fun i q =>
(fun p => p.fst, cons p.snd q.snd) (f i q.fst)
@@ -99,12 +96,7 @@ theorem iunfoldr_getLsbD {f : Fin w → αα × Bool} (state : Nat → α)
exact (iunfoldr_getLsbD' state ind).1 i
/--
Given a function `state` that provides the correct state for every potential iteration count and a
function that computes these states from the correct initial state, the result of applying
`BitVec.iunfoldr f` to the initial state is the state corresponding to the bitvector's width paired
with the bitvector that consists of each computed bit.
This theorem can be used to prove properties of functions that are defined using `BitVec.iunfoldr`.
Correctness theorem for `iunfoldr`.
-/
theorem iunfoldr_replace
{f : Fin w α α × Bool} (state : Nat α) (value : BitVec w) (a : α)

View File

@@ -136,12 +136,6 @@ protected theorem toNat_lt_twoPow_of_le (h : m ≤ n) {x : BitVec m} :
theorem testBit_toNat (x : BitVec w) : x.toNat.testBit i = x.getLsbD i := rfl
theorem two_pow_le_toNat_of_getElem_eq_true {i : Nat} {x : BitVec w}
(hi : i < w) (hx : x[i] = true) : 2^i x.toNat := by
apply Nat.testBit_implies_ge
rw [ getElem_eq_testBit_toNat x i hi]
exact hx
theorem getMsb'_eq_getLsb' (x : BitVec w) (i : Fin w) :
x.getMsb' i = x.getLsb' w - 1 - i, by omega := by
simp only [getMsb', getLsb']
@@ -269,11 +263,6 @@ theorem getMsbD_of_zero_length (h : w = 0) (x : BitVec w) : x.getMsbD i = false
subst h; simp [getMsbD_zero_length]
theorem msb_of_zero_length (h : w = 0) (x : BitVec w) : x.msb = false := by
subst h; simp [msb_zero_length]
theorem eq_of_zero_length (h : w = 0) {x y : BitVec w} : x = y := by
subst h; rw [eq_nil x, eq_nil y]
theorem length_pos_of_ne {x y : BitVec w} (h : x y) : 0 < w :=
Nat.zero_lt_of_ne_zero (mt (fun h => eq_of_zero_length h) h)
theorem ofFin_ofNat (n : Nat) :
ofFin (no_index (OfNat.ofNat n : Fin (2^w))) = OfNat.ofNat n := by
@@ -767,36 +756,12 @@ theorem slt_zero_iff_msb_cond {x : BitVec w} : x.slt 0#w ↔ x.msb = true := by
theorem slt_zero_eq_msb {w : Nat} {x : BitVec w} : x.slt 0#w = x.msb := by
rw [Bool.eq_iff_iff, BitVec.slt_zero_iff_msb_cond]
theorem sle_eq_decide {x y : BitVec w} : x.sle y = decide (x.toInt y.toInt) := rfl
theorem slt_eq_decide {x y : BitVec w} : x.slt y = decide (x.toInt < y.toInt) := rfl
theorem ule_eq_decide {x y : BitVec w} : x.ule y = decide (x.toNat y.toNat) := rfl
theorem ult_eq_decide {x y : BitVec w} : x.ult y = decide (x.toNat < y.toNat) := rfl
theorem ule_eq_decide_le {x y : BitVec w} : x.ule y = decide (x y) := rfl
theorem ult_eq_decide_lt {x y : BitVec w} : x.ult y = decide (x < y) := rfl
theorem ule_iff_le {x y : BitVec w} : x.ule y x y :=
decide_eq_true_iff
theorem ult_iff_lt {x y : BitVec w} : x.ult y x < y :=
decide_eq_true_iff
theorem sle_iff_toInt_le {w : Nat} {x y : BitVec w} : x.sle y x.toInt y.toInt :=
decide_eq_true_iff
theorem slt_iff_toInt_lt {w : Nat} {x y : BitVec w} : x.slt y x.toInt < y.toInt :=
decide_eq_true_iff
theorem ule_iff_toNat_le {x y : BitVec w} : x.ule y x.toNat y.toNat :=
decide_eq_true_iff
theorem ult_iff_toNat_lt {x y : BitVec w} : x.ult y x.toNat < y.toNat :=
decide_eq_true_iff
theorem sle_eq_slt_or_eq {x y : BitVec w} : x.sle y = (x.slt y || x == y) := by
apply Bool.eq_iff_iff.2
simp only [BitVec.sle, decide_eq_true_eq, BitVec.slt, Bool.or_eq_true, beq_iff_eq, toInt_inj]
@@ -2539,6 +2504,7 @@ theorem toInt_signExtend_eq_toInt_bmod_of_le (x : BitVec w) (h : v ≤ w) :
(x.signExtend v).toInt = x.toInt.bmod (2 ^ v) := by
rw [BitVec.toInt_signExtend, Nat.min_eq_left h]
theorem toFin_signExtend_of_le {x : BitVec w} (hv : v w):
(x.signExtend v).toFin = Fin.ofNat' (2 ^ v) x.toNat := by
simp [signExtend_eq_setWidth_of_le _ hv]
@@ -3345,14 +3311,6 @@ theorem sub_toAdd {n} (x y : BitVec n) : x - y = x + - y := by
simp only [toNat_sub, toNat_add, toNat_neg, Nat.add_mod_mod]
rw [Nat.add_comm]
theorem add_left_neg (x : BitVec w) : -x + x = 0#w := by
apply toInt_inj.mp
simp [toInt_neg, Int.add_left_neg]
theorem add_right_neg (x : BitVec w) : x + -x = 0#w := by
rw [BitVec.add_comm]
exact add_left_neg x
@[simp] theorem neg_zero (n:Nat) : -BitVec.ofNat n 0 = BitVec.ofNat n 0 := by apply eq_of_toNat_eq ; simp
theorem add_sub_cancel (x y : BitVec w) : x + y - y = x := by
@@ -3718,11 +3676,6 @@ theorem not_lt_iff_le {x y : BitVec w} : (¬ x < y) ↔ y ≤ x := by
constructor <;>
(intro h; simp only [lt_def, Nat.not_lt, le_def] at h ; omega)
protected theorem le_of_lt {x y : BitVec w} (h : x < y) : x y := Nat.le_of_lt h
protected theorem le_of_eq {x y : BitVec w} (h : x = y) : x y :=
Nat.le_of_eq (toNat_eq.mp h)
@[simp]
theorem not_lt_zero {x : BitVec w} : ¬x < 0#w := of_decide_eq_false rfl
@@ -3759,18 +3712,9 @@ theorem allOnes_le_iff {x : BitVec w} : allOnes w ≤ x ↔ x = allOnes w := by
@[simp]
theorem lt_allOnes_iff {x : BitVec w} : x < allOnes w x allOnes w := by
have := not_congr (@allOnes_le_iff w x)
rw [BitVec.not_le] at this
exact this
theorem le_of_zero_length (h : w = 0) {x y : BitVec w} : x y := by
exact BitVec.le_of_eq (eq_of_zero_length h)
theorem pos_of_msb {x : BitVec w} (hx : x.msb = true) : 0#w < x := by
apply Decidable.by_contra
intro h
rw [BitVec.not_lt, le_zero_iff] at h
simp [h] at hx
have := not_congr (@allOnes_le_iff w x)
rw [BitVec.not_le] at this
exact this
/-! ### udiv -/
@@ -4739,9 +4683,6 @@ The RHS is zero in case `w = 0` which is modeled by wrapping the expression in `
theorem toNat_intMin : (intMin w).toNat = 2 ^ (w - 1) % 2 ^ w := by
simp [intMin]
theorem toNat_intMin_of_pos (hw : 0 < w) : (intMin w).toNat = 2 ^ (w - 1) := by
rw [toNat_intMin, Nat.mod_eq_of_lt (Nat.pow_lt_pow_of_lt (by decide) (Nat.sub_one_lt_of_lt hw))]
@[simp]
theorem intMin_eq_zero_iff {w : Nat} : intMin w = 0#w w = 0 := by
by_cases h : w = 0
@@ -4838,37 +4779,6 @@ theorem ne_intMin_of_msb_eq_false (h : 0 < w) {n : BitVec w} (hn : n.msb = false
simp only [msb_intMin, decide_eq_false_iff_not, Nat.not_lt, Nat.le_zero_eq] at hn
omega
theorem toInt_neg_eq_of_msb {x : BitVec w} (h : x.msb = false) : (-x).toInt = -x.toInt := by
match w with
| 0 => rw [of_length_zero (x := x), neg_zero, toInt_zero, Int.neg_zero]
| w' + 1 => exact toInt_neg_of_ne_intMin (ne_intMin_of_msb_eq_false (Nat.zero_lt_succ _) h)
theorem lt_intMin_iff_msb_eq_false {x : BitVec w} (hw : 0 < w) :
x < intMin w x.msb = false := by
simp only [msb_eq_false_iff_two_mul_lt, toNat_intMin_of_pos hw, lt_def]
rw [ Nat.mul_lt_mul_left (by decide : 0 < 2), Nat.pow_add_one', Nat.sub_one_add_one_eq_of_pos hw]
theorem intMin_le_iff_msb_eq_true {x : BitVec w} (hw : 0 < w) :
intMin w x x.msb = true := by
rw [ Decidable.not_iff_not, BitVec.not_le, Bool.not_eq_true]
exact lt_intMin_iff_msb_eq_false hw
theorem le_intMin_of_msb_eq_false {x : BitVec w} (hx : x.msb = false) : x intMin w := by
match w with
| 0 => exact le_of_zero_length rfl
| w' + 1 =>
apply BitVec.le_of_lt
exact (lt_intMin_iff_msb_eq_false (Nat.zero_lt_succ _)).mpr hx
theorem neg_le_intMin_of_msb_eq_true {x : BitVec w} (hx : x.msb = true) : -x intMin w := by
match w with
| 0 => exact le_of_zero_length rfl
| w' + 1 =>
rw [le_def, toNat_neg_of_pos (pos_of_msb hx), toNat_intMin_of_pos (Nat.zero_lt_succ _)]
simp only [Nat.succ_eq_add_one, Nat.add_one_sub_one, Nat.pow_add_one]
rw [msb_eq_true_iff_two_mul_ge, Nat.pow_add_one] at hx
omega
/-! ### intMax -/
/-- The bitvector of width `w` that has the largest value when interpreted as an integer. -/

View File

@@ -1,74 +0,0 @@
/-
Copyright (c) 2025 Lean FRO, LLC. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Leonardo de Moura, Jeremy Avigad, Mario Carneiro, Paul Reichert
-/
prelude
import Init.Data.Ord
import Init.Data.Int.Order
/-! # Basic lemmas about comparing integers
This file introduces some basic lemmas about `compare` as applied to integers.
Import `Std.Classes.Ord` in order to obtain the `TransOrd` and `LawfulEqOrd` instances for `Int`.
-/
namespace Int
protected theorem lt_or_eq_of_le {n m : Int} (h : n m) : n < m n = m := by
omega
protected theorem le_iff_lt_or_eq {n m : Int} : n m n < m n = m :=
Int.lt_or_eq_of_le, fun | .inl h => Int.le_of_lt h | .inr rfl => Int.le_refl _
theorem compare_eq_ite_lt (a b : Int) :
compare a b = if a < b then .lt else if b < a then .gt else .eq := by
simp only [compare, compareOfLessAndEq]
split
· rfl
· next h =>
match Int.lt_or_eq_of_le (Int.not_lt.1 h) with
| .inl h => simp [h, Int.ne_of_gt h]
| .inr rfl => simp
theorem compare_eq_ite_le (a b : Int) :
compare a b = if a b then if b a then .eq else .lt else .gt := by
rw [compare_eq_ite_lt]
split
· next hlt => simp [Int.le_of_lt hlt, Int.not_le.2 hlt]
· next hge =>
split
· next hgt => simp [Int.le_of_lt hgt, Int.not_le.2 hgt]
· next hle => simp [Int.not_lt.1 hge, Int.not_lt.1 hle]
protected theorem compare_swap (a b : Int) : (compare a b).swap = compare b a := by
simp only [compare_eq_ite_le]; (repeat' split) <;> try rfl
next h1 h2 => cases h1 (Int.le_of_not_le h2)
protected theorem compare_eq_eq {a b : Int} : compare a b = .eq a = b := by
rw [compare_eq_ite_lt]; (repeat' split) <;> simp [Int.ne_of_lt, Int.ne_of_gt, *]
next hlt hgt => exact Int.le_antisymm (Int.not_lt.1 hgt) (Int.not_lt.1 hlt)
protected theorem compare_eq_lt {a b : Int} : compare a b = .lt a < b := by
rw [compare_eq_ite_lt]; (repeat' split) <;> simp [*]
protected theorem compare_eq_gt {a b : Int} : compare a b = .gt b < a := by
rw [compare_eq_ite_lt]; (repeat' split) <;> simp [Int.le_of_lt, *]
protected theorem compare_ne_gt {a b : Int} : compare a b .gt a b := by
rw [compare_eq_ite_le]; (repeat' split) <;> simp [*]
protected theorem compare_ne_lt {a b : Int} : compare a b .lt b a := by
rw [compare_eq_ite_le]; (repeat' split) <;> simp [Int.le_of_not_le, *]
protected theorem isLE_compare {a b : Int} :
(compare a b).isLE a b := by
simp only [Int.compare_eq_ite_le]
repeat' split <;> simp_all
protected theorem isGE_compare {a b : Int} :
(compare a b).isGE b a := by
rw [ Int.compare_swap, Ordering.isGE_swap]
exact Int.isLE_compare
end Int

View File

@@ -76,11 +76,6 @@ theorem neg_lt_self_iff {n : Int} : -n < n ↔ 0 < n := by
theorem pos_iff_toNat_pos {n : Int} : 0 < n 0 < n.toNat := by
omega
theorem ofNat_toNat_eq_self {a : Int} : a.toNat = a 0 a := by omega
theorem eq_ofNat_toNat {a : Int} : a = a.toNat 0 a := by omega
theorem toNat_le_toNat {n m : Int} (h : n m) : n.toNat m.toNat := by omega
theorem toNat_lt_toNat {n m : Int} (hn : 0 < m) : n.toNat < m.toNat n < m := by omega
/-! ### natAbs -/
theorem eq_zero_of_dvd_of_natAbs_lt_natAbs {d n : Int} (h : d n) (h₁ : n.natAbs < d.natAbs) :

View File

@@ -1796,29 +1796,6 @@ theorem of_not_dvd (a b : Int) : a != 0 → ¬ (a b) → b % a > 0 := by
simp [h₁] at h₂
assumption
def le_of_le_cert (p q : Poly) (k : Nat) : Bool :=
q == p.addConst (- k)
theorem le_of_le (ctx : Context) (p q : Poly) (k : Nat)
: le_of_le_cert p q k p.denote' ctx 0 q.denote' ctx 0 := by
simp [le_of_le_cert]; intro; subst q; simp
intro h
simp [Lean.Omega.Int.add_le_zero_iff_le_neg']
exact Int.le_trans h (Int.ofNat_zero_le _)
def not_le_of_le_cert (p q : Poly) (k : Nat) : Bool :=
q == (p.mul (-1)).addConst (1 + k)
theorem not_le_of_le (ctx : Context) (p q : Poly) (k : Nat)
: not_le_of_le_cert p q k p.denote' ctx 0 ¬ q.denote' ctx 0 := by
simp [not_le_of_le_cert]; intro; subst q
intro h
apply Int.pos_of_neg_neg
apply Int.lt_of_add_one_le
simp [Int.neg_add, Int.neg_sub]
rw [ Int.add_assoc, Int.add_assoc, Int.add_neg_cancel_right, Lean.Omega.Int.add_le_zero_iff_le_neg']
simp; exact Int.le_trans h (Int.ofNat_zero_le _)
end Int.Linear
theorem Int.not_le_eq (a b : Int) : (¬a b) = (b + 1 a) := by

View File

@@ -921,8 +921,6 @@ theorem head?_eq_getElem? : ∀ {l : List α}, l.head? = l[0]?
| [] => rfl
| a :: l => by simp
theorem head_singleton {a : α} : head [a] (by simp) = a := by simp
theorem head_eq_getElem {l : List α} (h : l []) : head l h = l[0]'(length_pos_iff.mpr h) := by
cases l with
| nil => simp at h
@@ -1055,9 +1053,6 @@ theorem getLast?_tail {l : List α} : (tail l).getLast? = if l.length = 1 then n
| nil => simp [List.map]
| cons _ as ih => simp [List.map, ih]
@[simp] theorem isEmpty_map {l : List α} {f : α β} : (l.map f).isEmpty = l.isEmpty := by
cases l <;> simp
@[simp] theorem getElem?_map {f : α β} : {l : List α} {i : Nat}, (map f l)[i]? = Option.map f l[i]?
| [], _ => rfl
| _ :: _, 0 => by simp
@@ -3364,7 +3359,7 @@ theorem all_eq_not_any_not {l : List α} {p : α → Bool} : l.all p = !l.any (!
split <;> simp_all
@[simp] theorem all_filter {l : List α} {p q : α Bool} :
(filter p l).all q = l.all fun a => !(p a) || q a := by
(filter p l).all q = l.all fun a => p a q a := by
induction l with
| nil => rfl
| cons h t ih =>

View File

@@ -777,34 +777,31 @@ protected theorem pow_succ (n m : Nat) : n^(succ m) = n^m * n :=
protected theorem pow_add_one (n m : Nat) : n^(m + 1) = n^m * n :=
rfl
@[simp] protected theorem pow_zero (n : Nat) : n^0 = 1 := rfl
protected theorem pow_zero (n : Nat) : n^0 = 1 := rfl
@[simp] protected theorem pow_one (a : Nat) : a ^ 1 = a := by
simp [Nat.pow_succ]
protected theorem pow_le_pow_left {n m : Nat} (h : n m) : (i : Nat), n^i m^i
theorem pow_le_pow_left {n m : Nat} (h : n m) : (i : Nat), n^i m^i
| 0 => Nat.le_refl _
| succ i => Nat.mul_le_mul (Nat.pow_le_pow_left h i) h
| succ i => Nat.mul_le_mul (pow_le_pow_left h i) h
protected theorem pow_le_pow_right {n : Nat} (hx : n > 0) {i : Nat} : {j}, i j n^i n^j
theorem pow_le_pow_right {n : Nat} (hx : n > 0) {i : Nat} : {j}, i j n^i n^j
| 0, h =>
have : i = 0 := eq_zero_of_le_zero h
this.symm Nat.le_refl _
| succ j, h =>
match le_or_eq_of_le_succ h with
| Or.inl h => show n^i n^j * n from
have : n^i * 1 n^j * n := Nat.mul_le_mul (Nat.pow_le_pow_right hx h) hx
have : n^i * 1 n^j * n := Nat.mul_le_mul (pow_le_pow_right hx h) hx
Nat.mul_one (n^i) this
| Or.inr h =>
h.symm Nat.le_refl _
set_option linter.missingDocs false in
@[deprecated Nat.pow_le_pow_left (since := "2025-02-17")]
abbrev pow_le_pow_of_le_left := @Nat.pow_le_pow_left
abbrev pow_le_pow_of_le_left := @pow_le_pow_left
set_option linter.missingDocs false in
@[deprecated Nat.pow_le_pow_right (since := "2025-02-17")]
abbrev pow_le_pow_of_le_right := @Nat.pow_le_pow_right
abbrev pow_le_pow_of_le_right := @pow_le_pow_right
protected theorem pow_pos (h : 0 < a) : 0 < a^n :=
match n with
@@ -825,33 +822,6 @@ protected theorem two_pow_pos (w : Nat) : 0 < 2^w := Nat.pow_pos (by decide)
instance {n m : Nat} [NeZero n] : NeZero (n^m) :=
Nat.ne_zero_iff_zero_lt.mpr (Nat.pow_pos (pos_of_neZero _))
protected theorem mul_pow (a b n : Nat) : (a * b) ^ n = a ^ n * b ^ n := by
induction n with
| zero => simp [Nat.pow_zero]
| succ n ih =>
rw [Nat.pow_succ, ih, Nat.pow_succ, Nat.pow_succ, Nat.mul_assoc, Nat.mul_assoc]
congr 1
rw [Nat.mul_assoc, Nat.mul_assoc, Nat.mul_comm _ a]
protected theorem pow_lt_pow_left {a b n : Nat} (hab : a < b) (h : n 0) : a ^ n < b ^ n := by
cases n with
| zero => simp at h
| succ n =>
clear h
induction n with
| zero => simpa
| succ n ih =>
rw [Nat.pow_succ a, Nat.pow_succ b]
exact Nat.lt_of_le_of_lt (Nat.mul_le_mul_left _ (Nat.le_of_lt hab))
(Nat.mul_lt_mul_of_pos_right ih (Nat.lt_of_le_of_lt (Nat.zero_le _) hab))
protected theorem pow_left_inj {a b n : Nat} (hn : n 0) : a ^ n = b ^ n a = b := by
refine fun h => ?_, (· rfl)
match Nat.lt_trichotomy a b with
| Or.inl hab => exact False.elim (absurd h (ne_of_lt (Nat.pow_lt_pow_left hab hn)))
| Or.inr (Or.inl hab) => exact hab
| Or.inr (Or.inr hab) => exact False.elim (absurd h (Nat.ne_of_lt' (Nat.pow_lt_pow_left hab hn)))
/-! # min/max -/
/--
@@ -1200,15 +1170,9 @@ protected theorem mul_sub_right_distrib (n m k : Nat) : (n - m) * k = n * k - m
| zero => simp
| succ m ih => rw [Nat.sub_succ, Nat.pred_mul, ih, succ_mul, Nat.sub_sub]; done
protected theorem sub_mul (n m k : Nat) : (n - m) * k = n * k - m * k :=
Nat.mul_sub_right_distrib n m k
protected theorem mul_sub_left_distrib (n m k : Nat) : n * (m - k) = n * m - n * k := by
rw [Nat.mul_comm, Nat.mul_sub_right_distrib, Nat.mul_comm m n, Nat.mul_comm n k]
protected theorem mul_sub (n m k : Nat) : n * (m - k) = n * m - n * k :=
Nat.mul_sub_left_distrib n m k
/-! # Helper normalization theorems -/
theorem not_le_eq (a b : Nat) : (¬ (a b)) = (b + 1 a) :=

View File

@@ -501,7 +501,7 @@ theorem and_lt_two_pow (x : Nat) {y n : Nat} (right : y < 2^n) : (x &&& y) < 2^n
have yf : testBit y i = false := by
apply Nat.testBit_lt_two_pow
apply Nat.lt_of_lt_of_le right
exact Nat.pow_le_pow_right Nat.zero_lt_two i_ge_n
exact pow_le_pow_right Nat.zero_lt_two i_ge_n
simp [testBit_and, yf]
@[simp] theorem and_two_pow_sub_one_eq_mod (x n : Nat) : x &&& 2^n - 1 = x % 2^n := by
@@ -828,9 +828,3 @@ theorem and_le_left {n m : Nat} : n &&& m ≤ n :=
theorem and_le_right {n m : Nat} : n &&& m m :=
le_of_testBit (by simp)
theorem left_le_or {n m : Nat} : n n ||| m :=
le_of_testBit (by simpa using fun i => Or.inl)
theorem right_le_or {n m : Nat} : m n ||| m :=
le_of_testBit (by simpa using fun i => Or.inr)

View File

@@ -4,18 +4,17 @@ Released under Apache 2.0 license as described in the file LICENSE.
Authors: Leonardo de Moura, Jeremy Avigad, Mario Carneiro
-/
prelude
import Init.Classical
import Init.Data.Ord
/-! # Basic lemmas about comparing natural numbers
This file introduce some basic lemmas about compare as applied to natural
numbers.
Import `Std.Classes.Ord` in order to obtain the `TransOrd` and `LawfulEqOrd` instances for `Nat`.
-/
namespace Nat
theorem compare_eq_ite_lt (a b : Nat) :
theorem compare_def_lt (a b : Nat) :
compare a b = if a < b then .lt else if b < a then .gt else .eq := by
simp only [compare, compareOfLessAndEq]
split
@@ -25,12 +24,9 @@ theorem compare_eq_ite_lt (a b : Nat) :
| .inl h => simp [h, Nat.ne_of_gt h]
| .inr rfl => simp
@[deprecated compare_eq_ite_lt (since := "2025-03_28")]
def compare_def_lt := compare_eq_ite_lt
theorem compare_eq_ite_le (a b : Nat) :
theorem compare_def_le (a b : Nat) :
compare a b = if a b then if b a then .eq else .lt else .gt := by
rw [compare_eq_ite_lt]
rw [compare_def_lt]
split
· next hlt => simp [Nat.le_of_lt hlt, Nat.not_le.2 hlt]
· next hge =>
@@ -38,37 +34,24 @@ theorem compare_eq_ite_le (a b : Nat) :
· next hgt => simp [Nat.le_of_lt hgt, Nat.not_le.2 hgt]
· next hle => simp [Nat.not_lt.1 hge, Nat.not_lt.1 hle]
@[deprecated compare_eq_ite_le (since := "2025-03_28")]
def compare_def_le := compare_eq_ite_le
protected theorem compare_swap (a b : Nat) : (compare a b).swap = compare b a := by
simp only [compare_eq_ite_le]; (repeat' split) <;> try rfl
simp only [compare_def_le]; (repeat' split) <;> try rfl
next h1 h2 => cases h1 (Nat.le_of_not_le h2)
protected theorem compare_eq_eq {a b : Nat} : compare a b = .eq a = b := by
rw [compare_eq_ite_lt]; (repeat' split) <;> simp [Nat.ne_of_lt, Nat.ne_of_gt, *]
rw [compare_def_lt]; (repeat' split) <;> simp [Nat.ne_of_lt, Nat.ne_of_gt, *]
next hlt hgt => exact Nat.le_antisymm (Nat.not_lt.1 hgt) (Nat.not_lt.1 hlt)
protected theorem compare_eq_lt {a b : Nat} : compare a b = .lt a < b := by
rw [compare_eq_ite_lt]; (repeat' split) <;> simp [*]
rw [compare_def_lt]; (repeat' split) <;> simp [*]
protected theorem compare_eq_gt {a b : Nat} : compare a b = .gt b < a := by
rw [compare_eq_ite_lt]; (repeat' split) <;> simp [Nat.le_of_lt, *]
rw [compare_def_lt]; (repeat' split) <;> simp [Nat.le_of_lt, *]
protected theorem compare_ne_gt {a b : Nat} : compare a b .gt a b := by
rw [compare_eq_ite_le]; (repeat' split) <;> simp [*]
rw [compare_def_le]; (repeat' split) <;> simp [*]
protected theorem compare_ne_lt {a b : Nat} : compare a b .lt b a := by
rw [compare_eq_ite_le]; (repeat' split) <;> simp [Nat.le_of_not_le, *]
protected theorem isLE_compare {a b : Nat} :
(compare a b).isLE a b := by
simp only [Nat.compare_eq_ite_le]
repeat' split <;> simp_all
protected theorem isGE_compare {a b : Nat} :
(compare a b).isGE b a := by
rw [ Nat.compare_swap, Ordering.isGE_swap]
exact Nat.isLE_compare
rw [compare_def_le]; (repeat' split) <;> simp [Nat.le_of_not_le, *]
end Nat

View File

@@ -21,12 +21,6 @@ protected theorem dvd_trans {a b c : Nat} (h₁ : a b) (h₂ : b c) : a
| d, (h₃ : b = a * d), e, (h₄ : c = b * e) =>
d * e, show c = a * (d * e) by simp[h₃,h₄, Nat.mul_assoc]
protected theorem dvd_mul_left_of_dvd {a b : Nat} (h : a b) (c : Nat) : a c * b :=
Nat.dvd_trans h (Nat.dvd_mul_left _ _)
protected theorem dvd_mul_right_of_dvd {a b : Nat} (h : a b) (c : Nat) : a b * c :=
Nat.dvd_trans h (Nat.dvd_mul_right _ _)
protected theorem eq_zero_of_zero_dvd {a : Nat} (h : 0 a) : a = 0 :=
let c, H' := h; H'.trans c.zero_mul
@@ -112,26 +106,8 @@ protected theorem dvd_of_mul_dvd_mul_left
protected theorem dvd_of_mul_dvd_mul_right (kpos : 0 < k) (H : m * k n * k) : m n := by
rw [Nat.mul_comm m k, Nat.mul_comm n k] at H; exact Nat.dvd_of_mul_dvd_mul_left kpos H
theorem dvd_sub {k m n : Nat} (h₁ : k m) (h₂ : k n) : k m - n :=
if H : n m then
(Nat.dvd_add_iff_left h₂).2 <| by rwa [Nat.sub_add_cancel H]
else
Nat.sub_eq_zero_of_le (Nat.le_of_not_le H) Nat.dvd_zero k
theorem dvd_sub_iff_right {m n k : Nat} (hkn : k n) (h : m n) : m n - k m k := by
refine ?_, dvd_sub h
let x, hx := h
cases hx
intro hy
let y, hy := hy
have hk : k = m * (x - y) := by
rw [Nat.sub_eq_iff_eq_add hkn] at hy
rw [Nat.mul_sub, hy, Nat.add_comm, Nat.add_sub_cancel]
exact hk Nat.dvd_mul_right _ _
theorem dvd_sub_iff_left {m n k : Nat} (hkn : k n) (h : m k) : m n - k m n := by
rw (occs := [2]) [ Nat.sub_add_cancel hkn]
exact Nat.dvd_add_iff_left h
theorem dvd_sub {k m n : Nat} (H : n m) (h₁ : k m) (h₂ : k n) : k m - n :=
(Nat.dvd_add_iff_left h₂).2 <| by rwa [Nat.sub_add_cancel H]
protected theorem mul_dvd_mul {a b c d : Nat} : a b c d a * c b * d
| e, he, f, hf =>

View File

@@ -1,7 +1,7 @@
/-
Copyright (c) 2021 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Jeremy Avigad, Leonardo de Moura, Mario Carneiro, Markus Himmel
Authors: Jeremy Avigad, Leonardo de Moura, Mario Carneiro
-/
prelude
import Init.Data.Nat.Dvd
@@ -106,11 +106,11 @@ theorem gcd_comm (m n : Nat) : gcd m n = gcd n m :=
(dvd_gcd (gcd_dvd_right n m) (gcd_dvd_left n m))
instance : Std.Commutative gcd := gcd_comm
theorem gcd_eq_left_iff_dvd : gcd m n = m m n :=
fun h => h gcd_dvd_right m n,
fun h => by rw [gcd_rec, mod_eq_zero_of_dvd h, gcd_zero_left]
theorem gcd_eq_left_iff_dvd : m n gcd m n = m :=
fun h => by rw [gcd_rec, mod_eq_zero_of_dvd h, gcd_zero_left],
fun h => h gcd_dvd_right m n
theorem gcd_eq_right_iff_dvd : gcd n m = m m n := by
theorem gcd_eq_right_iff_dvd : m n gcd n m = m := by
rw [gcd_comm]; exact gcd_eq_left_iff_dvd
theorem gcd_assoc (m n k : Nat) : gcd (gcd m n) k = gcd m (gcd n k) :=
@@ -174,19 +174,11 @@ theorem gcd_dvd_gcd_of_dvd_left {m k : Nat} (n : Nat) (H : m k) : gcd m n
theorem gcd_dvd_gcd_of_dvd_right {m k : Nat} (n : Nat) (H : m k) : gcd n m gcd n k :=
dvd_gcd (gcd_dvd_left n m) (Nat.dvd_trans (gcd_dvd_right n m) H)
theorem gcd_dvd_gcd_mul_left_left (m n k : Nat) : gcd m n gcd (k * m) n :=
theorem gcd_dvd_gcd_mul_left (m n k : Nat) : gcd m n gcd (k * m) n :=
gcd_dvd_gcd_of_dvd_left _ (Nat.dvd_mul_left _ _)
@[deprecated gcd_dvd_gcd_mul_left_left (since := "2025-04-01")]
theorem gcd_dvd_gcd_mul_left (m n k : Nat) : gcd m n gcd (k * m) n :=
gcd_dvd_gcd_mul_left_left m n k
theorem gcd_dvd_gcd_mul_right_left (m n k : Nat) : gcd m n gcd (m * k) n :=
gcd_dvd_gcd_of_dvd_left _ (Nat.dvd_mul_right _ _)
@[deprecated gcd_dvd_gcd_mul_right_left (since := "2025-04-01")]
theorem gcd_dvd_gcd_mul_right (m n k : Nat) : gcd m n gcd (m * k) n :=
gcd_dvd_gcd_mul_right_left m n k
gcd_dvd_gcd_of_dvd_left _ (Nat.dvd_mul_right _ _)
theorem gcd_dvd_gcd_mul_left_right (m n k : Nat) : gcd m n gcd m (k * n) :=
gcd_dvd_gcd_of_dvd_right _ (Nat.dvd_mul_left _ _)
@@ -200,16 +192,6 @@ theorem gcd_eq_left {m n : Nat} (H : m n) : gcd m n = m :=
theorem gcd_eq_right {m n : Nat} (H : n m) : gcd m n = n := by
rw [gcd_comm, gcd_eq_left H]
theorem gcd_right_eq_iff {m n n' : Nat} : gcd m n = gcd m n' k, k m (k n k n') := by
refine fun h k hkm => fun hkn => ?_, fun hkn' => ?_, fun h => Nat.dvd_antisymm ?_ ?_
· exact Nat.dvd_trans (h dvd_gcd hkm hkn) (Nat.gcd_dvd_right m n')
· exact Nat.dvd_trans (h dvd_gcd hkm hkn') (Nat.gcd_dvd_right m n)
· exact dvd_gcd_iff.2 gcd_dvd_left _ _, (h _ (gcd_dvd_left _ _)).1 (gcd_dvd_right _ _)
· exact dvd_gcd_iff.2 gcd_dvd_left _ _, (h _ (gcd_dvd_left _ _)).2 (gcd_dvd_right _ _)
theorem gcd_left_eq_iff {m m' n : Nat} : gcd m n = gcd m' n k, k n (k m k m') := by
rw [gcd_comm m n, gcd_comm m' n, gcd_right_eq_iff]
@[simp] theorem gcd_mul_left_left (m n : Nat) : gcd (m * n) n = n :=
Nat.dvd_antisymm (gcd_dvd_right _ _) (dvd_gcd (Nat.dvd_mul_left _ _) (Nat.dvd_refl _))
@@ -234,123 +216,10 @@ theorem gcd_left_eq_iff {m m' n : Nat} : gcd m n = gcd m' n ↔ ∀ k, k n
@[simp] theorem gcd_gcd_self_left_left (m n : Nat) : gcd (gcd m n) m = gcd m n := by
rw [gcd_comm m n, gcd_gcd_self_left_right]
@[simp] theorem gcd_add_mul_right_right (m n k : Nat) : gcd m (n + k * m) = gcd m n := by
theorem gcd_add_mul_self (m n k : Nat) : gcd m (n + k * m) = gcd m n := by
simp [gcd_rec m (n + k * m), gcd_rec m n]
@[deprecated gcd_add_mul_right_right (since := "2025-03-31")]
theorem gcd_add_mul_self (m n k : Nat) : gcd m (n + k * m) = gcd m n :=
gcd_add_mul_right_right _ _ _
@[simp] theorem gcd_add_mul_left_right (m n k : Nat) : gcd m (n + m * k) = gcd m n := by
rw [Nat.mul_comm, gcd_add_mul_right_right]
@[simp] theorem gcd_mul_right_add_right (m n k : Nat) : gcd m (k * m + n) = gcd m n := by
rw [Nat.add_comm, gcd_add_mul_right_right]
@[simp] theorem gcd_mul_left_add_right (m n k : Nat) : gcd m (m * k + n) = gcd m n := by
rw [Nat.add_comm, gcd_add_mul_left_right]
@[simp] theorem gcd_add_mul_right_left (m n k : Nat) : gcd (n + k * m) m = gcd n m := by
rw [gcd_comm, gcd_add_mul_right_right, gcd_comm]
@[simp] theorem gcd_add_mul_left_left (m n k : Nat) : gcd (n + m * k) m = gcd n m := by
rw [Nat.mul_comm, gcd_add_mul_right_left]
@[simp] theorem gcd_mul_right_add_left (m n k : Nat) : gcd (k * m + n) m = gcd n m := by
rw [Nat.add_comm, gcd_add_mul_right_left]
@[simp] theorem gcd_mul_left_add_left (m n k : Nat) : gcd (m * k + n) m = gcd n m := by
rw [Nat.add_comm, gcd_add_mul_left_left]
@[simp] theorem gcd_add_self_right (m n : Nat) : gcd m (n + m) = gcd m n := by
simpa using gcd_add_mul_right_right _ _ 1
@[simp] theorem gcd_self_add_right (m n : Nat) : gcd m (m + n) = gcd m n := by
simpa using gcd_mul_right_add_right _ _ 1
@[simp] theorem gcd_add_self_left (m n : Nat) : gcd (n + m) m = gcd n m := by
simpa using gcd_add_mul_right_left _ _ 1
@[simp] theorem gcd_self_add_left (m n : Nat) : gcd (m + n) m = gcd n m := by
simpa using gcd_mul_right_add_left _ _ 1
@[simp] theorem gcd_add_left_left_of_dvd {m k : Nat} (n : Nat) :
m k gcd (k + n) m = gcd n m := by
rintro l, rfl; exact gcd_mul_left_add_left m n l
@[simp] theorem gcd_add_right_left_of_dvd {m k : Nat} (n : Nat) :
m k gcd (n + k) m = gcd n m := by
rintro l, rfl; exact gcd_add_mul_left_left m n l
@[simp] theorem gcd_add_left_right_of_dvd {n k : Nat} (m : Nat) :
n k gcd n (k + m) = gcd n m := by
rintro l, rfl; exact gcd_mul_left_add_right n m l
@[simp] theorem gcd_add_right_right_of_dvd {n k : Nat} (m : Nat) :
n k gcd n (m + k) = gcd n m := by
rintro l, rfl; exact gcd_add_mul_left_right n m l
@[simp] theorem gcd_sub_mul_right_right {m n k : Nat} (h : k * m n) :
gcd m (n - k * m) = gcd m n := by
rw [ gcd_add_mul_right_right m (n - k * m) k, Nat.sub_add_cancel h]
@[simp] theorem gcd_sub_mul_left_right {m n k : Nat} (h : m * k n) :
gcd m (n - m * k) = gcd m n := by
rw [ gcd_add_mul_left_right m (n - m * k) k, Nat.sub_add_cancel h]
@[simp] theorem gcd_mul_right_sub_right {m n k : Nat} (h : n k * m) :
gcd m (k * m - n) = gcd m n :=
gcd_right_eq_iff.2 fun _ hl => dvd_sub_iff_right h (Nat.dvd_mul_left_of_dvd hl _)
@[simp] theorem gcd_mul_left_sub_right {m n k : Nat} (h : n m * k) :
gcd m (m * k - n) = gcd m n := by
rw [Nat.mul_comm, gcd_mul_right_sub_right (Nat.mul_comm _ _ h)]
@[simp] theorem gcd_sub_mul_right_left {m n k : Nat} (h : k * m n) :
gcd (n - k * m) m = gcd n m := by
rw [gcd_comm, gcd_sub_mul_right_right h, gcd_comm]
@[simp] theorem gcd_sub_mul_left_left {m n k : Nat} (h : m * k n) :
gcd (n - m * k) m = gcd n m := by
rw [Nat.mul_comm, gcd_sub_mul_right_left (Nat.mul_comm _ _ h)]
@[simp] theorem gcd_mul_right_sub_left {m n k : Nat} (h : n k * m) :
gcd (k * m - n) m = gcd n m := by
rw [gcd_comm, gcd_mul_right_sub_right h, gcd_comm]
@[simp] theorem gcd_mul_left_sub_left {m n k : Nat} (h : n m * k) :
gcd (m * k - n) m = gcd n m := by
rw [Nat.mul_comm, gcd_mul_right_sub_left (Nat.mul_comm _ _ h)]
@[simp] theorem gcd_sub_self_right {m n : Nat} (h : m n) : gcd m (n - m) = gcd m n := by
simpa using gcd_sub_mul_right_right (k := 1) (by simpa using h)
@[simp] theorem gcd_self_sub_right {m n : Nat} (h : n m) : gcd m (m - n) = gcd m n := by
simpa using gcd_mul_right_sub_right (k := 1) (by simpa using h)
@[simp] theorem gcd_sub_self_left {m n : Nat} (h : m n) : gcd (n - m) m = gcd n m := by
simpa using gcd_sub_mul_right_left (k := 1) (by simpa using h)
@[simp] theorem gcd_self_sub_left {m n : Nat} (h : n m) : gcd (m - n) m = gcd n m := by
simpa using gcd_mul_right_sub_left (k := 1) (by simpa using h)
@[simp] theorem gcd_sub_left_left_of_dvd {n k : Nat} (m : Nat) (h : n k) :
m k gcd (k - n) m = gcd n m := by
rintro l, rfl; exact gcd_mul_left_sub_left h
@[simp] theorem gcd_sub_right_left_of_dvd {n k : Nat} (m : Nat) (h : k n) :
m k gcd (n - k) m = gcd n m := by
rintro l, rfl; exact gcd_sub_mul_left_left h
@[simp] theorem gcd_sub_left_right_of_dvd {m k : Nat} (n : Nat) (h : m k) :
n k gcd n (k - m) = gcd n m := by
rintro l, rfl; exact gcd_mul_left_sub_right h
@[simp] theorem gcd_sub_right_right_of_dvd {m k : Nat} (n : Nat) (h : k m) :
n k gcd n (m - k) = gcd n m := by
rintro l, rfl; exact gcd_sub_mul_left_right h
@[simp] theorem gcd_eq_zero_iff {i j : Nat} : gcd i j = 0 i = 0 j = 0 :=
theorem gcd_eq_zero_iff {i j : Nat} : gcd i j = 0 i = 0 j = 0 :=
fun h => eq_zero_of_gcd_eq_zero_left h, eq_zero_of_gcd_eq_zero_right h,
fun h => by simp [h]
@@ -368,7 +237,7 @@ theorem gcd_eq_iff {a b : Nat} :
· exact Nat.dvd_gcd ha hb
/-- Represent a divisor of `m * n` as a product of a divisor of `m` and a divisor of `n`. -/
def dvdProdDvdOfDvdProd {k m n : Nat} (h : k m * n) :
def prod_dvd_and_dvd_of_dvd_prod {k m n : Nat} (H : k m * n) :
{d : {m' // m' m} × {n' // n' n} // k = d.1.val * d.2.val} :=
if h0 : gcd k m = 0 then
0, eq_zero_of_gcd_eq_zero_right h0 Nat.dvd_refl 0,
@@ -379,97 +248,15 @@ def dvdProdDvdOfDvdProd {k m n : Nat} (h : k m * n) :
refine gcd k m, gcd_dvd_right k m, k / gcd k m, ?_, hd.symm
apply Nat.dvd_of_mul_dvd_mul_left (Nat.pos_of_ne_zero h0)
rw [hd, gcd_mul_right]
exact Nat.dvd_gcd (Nat.dvd_mul_right _ _) h
exact Nat.dvd_gcd (Nat.dvd_mul_right _ _) H
@[inherit_doc dvdProdDvdOfDvdProd, deprecated dvdProdDvdOfDvdProd (since := "2025-04-01")]
def prod_dvd_and_dvd_of_dvd_prod {k m n : Nat} (H : k m * n) :
{d : {m' // m' m} × {n' // n' n} // k = d.1.val * d.2.val} :=
dvdProdDvdOfDvdProd H
protected theorem dvd_mul {k m n : Nat} : k m * n k₁ k₂, k₁ m k₂ n k₁ * k₂ = k := by
refine fun h => ?_, ?_
· obtain k₁, hk₁, k₂, hk₂, rfl := dvdProdDvdOfDvdProd h
exact k₁, k₂, hk₁, hk₂, rfl
· rintro k₁, k₂, hk₁, hk₂, rfl
exact Nat.mul_dvd_mul hk₁ hk₂
theorem gcd_mul_right_dvd_mul_gcd (k m n : Nat) : gcd k (m * n) gcd k m * gcd k n := by
theorem gcd_mul_dvd_mul_gcd (k m n : Nat) : gcd k (m * n) gcd k m * gcd k n := by
let m', hm', n', hn', (h : gcd k (m * n) = m' * n') :=
dvdProdDvdOfDvdProd <| gcd_dvd_right k (m * n)
prod_dvd_and_dvd_of_dvd_prod <| gcd_dvd_right k (m * n)
rw [h]
have h' : m' * n' k := h gcd_dvd_left ..
exact Nat.mul_dvd_mul
(dvd_gcd (Nat.dvd_trans (Nat.dvd_mul_right m' n') h') hm')
(dvd_gcd (Nat.dvd_trans (Nat.dvd_mul_left n' m') h') hn')
@[deprecated gcd_mul_right_dvd_mul_gcd (since := "2025-04-02")]
theorem gcd_mul_dvd_mul_gcd (k m n : Nat) : gcd k (m * n) gcd k m * gcd k n :=
gcd_mul_right_dvd_mul_gcd k m n
theorem gcd_mul_left_dvd_mul_gcd (k m n : Nat) : gcd (m * n) k gcd m k * gcd n k := by
simpa [gcd_comm, Nat.mul_comm] using gcd_mul_right_dvd_mul_gcd _ _ _
theorem dvd_gcd_mul_iff_dvd_mul {k n m : Nat} : k gcd k n * m k n * m := by
refine (Nat.dvd_trans · <| Nat.mul_dvd_mul_right (k.gcd_dvd_right n) m), fun y, hy ?_
rw [ gcd_mul_right, hy, gcd_mul_left]
exact Nat.dvd_mul_right k (gcd m y)
theorem dvd_mul_gcd_iff_dvd_mul {k n m : Nat} : k n * gcd k m k n * m := by
rw [Nat.mul_comm, dvd_gcd_mul_iff_dvd_mul, Nat.mul_comm]
theorem dvd_gcd_mul_gcd_iff_dvd_mul {k n m : Nat} : k gcd k n * gcd k m k n * m := by
rw [dvd_gcd_mul_iff_dvd_mul, dvd_mul_gcd_iff_dvd_mul]
theorem gcd_eq_one_iff {m n : Nat} : gcd m n = 1 c, c m c n c = 1 := by
simp [gcd_eq_iff]
theorem gcd_mul_right_right_of_gcd_eq_one {n m k : Nat} : gcd n m = 1 gcd n (m * k) = gcd n k := by
rw [gcd_right_eq_iff, gcd_eq_one_iff]
refine fun h l hl₁ => ?_, fun a => Nat.dvd_mul_left_of_dvd a m
rw [Nat.dvd_mul]
rintro k₁, k₂, hk₁, hk₂, rfl
obtain rfl : k₁ = 1 := h _ (Nat.dvd_trans (Nat.dvd_mul_right k₁ k₂) hl₁) hk₁
simpa
theorem gcd_mul_left_right_of_gcd_eq_one {n m k : Nat} (h : gcd n m = 1) :
gcd n (k * m) = gcd n k := by
rw [Nat.mul_comm, gcd_mul_right_right_of_gcd_eq_one h]
theorem gcd_mul_right_left_of_gcd_eq_one {n m k : Nat} (h : gcd n m = 1) :
gcd (n * k) m = gcd k m := by
rw [gcd_comm, gcd_mul_right_right_of_gcd_eq_one (gcd_comm _ _ h), gcd_comm]
theorem gcd_mul_left_left_of_gcd_eq_one {n m k : Nat} (h : gcd n m = 1) :
gcd (k * n) m = gcd k m := by
rw [Nat.mul_comm, gcd_mul_right_left_of_gcd_eq_one h]
theorem gcd_pow_left_of_gcd_eq_one {k n m : Nat} (h : gcd n m = 1) : gcd (n ^ k) m = 1 := by
induction k with
| zero => simp [Nat.pow_zero]
| succ k ih => rw [Nat.pow_succ, gcd_mul_left_left_of_gcd_eq_one h, ih]
theorem gcd_pow_right_of_gcd_eq_one {k n m : Nat} (h : gcd n m = 1) : gcd n (m ^ k) = 1 := by
rw [gcd_comm, gcd_pow_left_of_gcd_eq_one (gcd_comm _ _ h)]
theorem pow_gcd_pow_of_gcd_eq_one {k l n m : Nat} (h : gcd n m = 1) : gcd (n ^ k) (m ^ l) = 1 :=
gcd_pow_left_of_gcd_eq_one (gcd_pow_right_of_gcd_eq_one h)
theorem gcd_div_gcd_div_gcd_of_pos_left {n m : Nat} (h : 0 < n) :
gcd (n / gcd n m) (m / gcd n m) = 1 := by
rw [gcd_div (gcd_dvd_left _ _) (gcd_dvd_right _ _), Nat.div_self (gcd_pos_of_pos_left _ h)]
theorem gcd_div_gcd_div_gcd_of_pos_right {n m : Nat} (h : 0 < m) :
gcd (n / gcd n m) (m / gcd n m) = 1 := by
rw [gcd_div (gcd_dvd_left _ _) (gcd_dvd_right _ _), Nat.div_self (gcd_pos_of_pos_right _ h)]
theorem pow_gcd_pow {k n m : Nat} : gcd (n ^ k) (m ^ k) = (gcd n m) ^ k := by
refine (Nat.eq_zero_or_pos n).elim (by rintro rfl; cases k <;> simp [Nat.pow_zero]) (fun hn => ?_)
conv => lhs; rw [ Nat.div_mul_cancel (gcd_dvd_left n m)]
conv => lhs; arg 2; rw [ Nat.div_mul_cancel (gcd_dvd_right n m)]
rw [Nat.mul_pow, Nat.mul_pow, gcd_mul_right, pow_gcd_pow_of_gcd_eq_one, Nat.one_mul]
exact gcd_div_gcd_div_gcd_of_pos_left hn
theorem pow_dvd_pow_iff {a b n : Nat} (h : n 0) : a ^ n b ^ n a b := by
rw [ gcd_eq_left_iff_dvd, gcd_eq_left_iff_dvd, pow_gcd_pow, Nat.pow_left_inj h]
end Nat

View File

@@ -1,7 +1,7 @@
/-
Copyright (c) 2014 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Jeremy Avigad, Leonardo de Moura, Mario Carneiro, Markus Himmel
Authors: Jeremy Avigad, Leonardo de Moura, Mario Carneiro
-/
prelude
import Init.Data.Nat.Gcd
@@ -10,6 +10,9 @@ import Init.Data.Nat.Lemmas
/-!
# Lemmas about `Nat.lcm`
## Future work:
Most of the material about `Nat.gcd` from `Init.Data.Nat.Gcd` has analogues for `Nat.lcm`
that should be added to this file.
-/
namespace Nat
@@ -26,36 +29,17 @@ Examples:
-/
def lcm (m n : Nat) : Nat := m * n / gcd m n
theorem lcm_eq_mul_div (m n : Nat) : lcm m n = m * n / gcd m n := rfl
@[simp] theorem gcd_mul_lcm (m n : Nat) : gcd m n * lcm m n = m * n := by
rw [lcm_eq_mul_div,
Nat.mul_div_cancel' (Nat.dvd_trans (gcd_dvd_left m n) (Nat.dvd_mul_right m n))]
@[simp] theorem lcm_mul_gcd (m n : Nat) : lcm m n * gcd m n = m * n := by
simp [Nat.mul_comm]
@[simp] theorem lcm_dvd_mul (m n : Nat) : lcm m n m * n := gcd m n, by simp
@[simp] theorem gcd_dvd_mul (m n : Nat) : gcd m n m * n := lcm m n, by simp
@[simp] theorem lcm_le_mul {m n : Nat} (hm : 0 < m) (hn : 0 < n) : lcm m n m * n :=
le_of_dvd (Nat.mul_pos hm hn) (lcm_dvd_mul _ _)
@[simp] theorem gcd_le_mul {m n : Nat} (hm : 0 < m) (hn : 0 < n) : gcd m n m * n :=
le_of_dvd (Nat.mul_pos hm hn) (gcd_dvd_mul _ _)
theorem lcm_comm (m n : Nat) : lcm m n = lcm n m := by
rw [lcm_eq_mul_div, lcm_eq_mul_div, Nat.mul_comm n m, gcd_comm n m]
rw [lcm, lcm, Nat.mul_comm n m, gcd_comm n m]
instance : Std.Commutative lcm := lcm_comm
@[simp] theorem lcm_zero_left (m : Nat) : lcm 0 m = 0 := by simp [lcm_eq_mul_div]
@[simp] theorem lcm_zero_left (m : Nat) : lcm 0 m = 0 := by simp [lcm]
@[simp] theorem lcm_zero_right (m : Nat) : lcm m 0 = 0 := by simp [lcm_eq_mul_div]
@[simp] theorem lcm_zero_right (m : Nat) : lcm m 0 = 0 := by simp [lcm]
@[simp] theorem lcm_one_left (m : Nat) : lcm 1 m = m := by simp [lcm_eq_mul_div]
@[simp] theorem lcm_one_left (m : Nat) : lcm 1 m = m := by simp [lcm]
@[simp] theorem lcm_one_right (m : Nat) : lcm m 1 = m := by simp [lcm_eq_mul_div]
@[simp] theorem lcm_one_right (m : Nat) : lcm m 1 = m := by simp [lcm]
instance : Std.LawfulIdentity lcm 1 where
left_id := lcm_one_left
right_id := lcm_one_right
@@ -63,32 +47,16 @@ instance : Std.LawfulIdentity lcm 1 where
@[simp] theorem lcm_self (m : Nat) : lcm m m = m := by
match eq_zero_or_pos m with
| .inl h => rw [h, lcm_zero_left]
| .inr h => simp [lcm_eq_mul_div, Nat.mul_div_cancel _ h]
| .inr h => simp [lcm, Nat.mul_div_cancel _ h]
instance : Std.IdempotentOp lcm := lcm_self
theorem dvd_lcm_left (m n : Nat) : m lcm m n :=
n / gcd m n, by rw [ Nat.mul_div_assoc m (Nat.gcd_dvd_right m n), lcm_eq_mul_div]
n / gcd m n, by rw [ Nat.mul_div_assoc m (Nat.gcd_dvd_right m n)]; rfl
theorem dvd_lcm_right (m n : Nat) : n lcm m n := lcm_comm n m dvd_lcm_left n m
theorem lcm_ne_zero (hm : m 0) (hn : n 0) : lcm m n 0 := by
intro h
have h1 := gcd_mul_lcm m n
rw [h, Nat.mul_zero] at h1
match mul_eq_zero.1 h1.symm with
| .inl hm1 => exact hm hm1
| .inr hn1 => exact hn hn1
theorem lcm_pos : 0 < m 0 < n 0 < lcm m n := by
simpa [ Nat.pos_iff_ne_zero] using lcm_ne_zero
theorem le_lcm_left (m : Nat) (hn : 0 < n) : m lcm m n :=
(Nat.eq_zero_or_pos m).elim (by rintro rfl; simp)
(fun hm => le_of_dvd (lcm_pos hm hn) (dvd_lcm_left m n))
theorem le_lcm_right (hm : 0 < m) (n : Nat) : n lcm m n :=
(Nat.eq_zero_or_pos n).elim (by rintro rfl; simp)
(fun hn => le_of_dvd (lcm_pos hm hn) (dvd_lcm_right m n))
theorem gcd_mul_lcm (m n : Nat) : gcd m n * lcm m n = m * n := by
rw [lcm, Nat.mul_div_cancel' (Nat.dvd_trans (gcd_dvd_left m n) (Nat.dvd_mul_right m n))]
theorem lcm_dvd {m n k : Nat} (H1 : m k) (H2 : n k) : lcm m n k := by
match eq_zero_or_pos k with
@@ -98,18 +66,6 @@ theorem lcm_dvd {m n k : Nat} (H1 : m k) (H2 : n k) : lcm m n k := b
rw [gcd_mul_lcm, gcd_mul_right, Nat.mul_comm n k]
exact dvd_gcd (Nat.mul_dvd_mul_left _ H2) (Nat.mul_dvd_mul_right H1 _)
theorem lcm_dvd_iff {m n k : Nat} : lcm m n k m k n k :=
fun h => Nat.dvd_trans (dvd_lcm_left _ _) h, Nat.dvd_trans (dvd_lcm_right _ _) h,
fun hm, hn => lcm_dvd hm hn
theorem lcm_eq_left_iff_dvd : lcm m n = m n m := by
refine (Nat.eq_zero_or_pos m).elim (by rintro rfl; simp) (fun hm => ?_)
rw [lcm_eq_mul_div, Nat.div_eq_iff_eq_mul_left (gcd_pos_of_pos_left _ hm) (gcd_dvd_mul _ _),
Nat.mul_left_cancel_iff hm, Eq.comm, gcd_eq_right_iff_dvd]
theorem lcm_eq_right_iff_dvd : lcm m n = n m n := by
rw [lcm_comm, lcm_eq_left_iff_dvd]
theorem lcm_assoc (m n k : Nat) : lcm (lcm m n) k = lcm m (lcm n k) :=
Nat.dvd_antisymm
(lcm_dvd
@@ -122,126 +78,12 @@ Nat.dvd_antisymm
(dvd_lcm_right (lcm m n) k)))
instance : Std.Associative lcm := lcm_assoc
theorem lcm_mul_left (m n k : Nat) : lcm (m * n) (m * k) = m * lcm n k := by
refine (Nat.eq_zero_or_pos m).elim (by rintro rfl; simp) (fun hm => ?_)
rw [lcm_eq_mul_div, gcd_mul_left,
Nat.mul_div_assoc _ (Nat.mul_dvd_mul_left _ (gcd_dvd_right _ _)), Nat.mul_div_mul_left _ _ hm,
lcm_eq_mul_div, Nat.mul_div_assoc _ (gcd_dvd_right _ _), Nat.mul_assoc]
theorem lcm_mul_right (m n k : Nat) : lcm (m * n) (k * n) = lcm m k * n := by
rw [Nat.mul_comm _ n, Nat.mul_comm _ n, Nat.mul_comm _ n, lcm_mul_left]
theorem eq_zero_of_lcm_eq_zero (h : lcm m n = 0) : m = 0 n = 0 := by
cases m <;> cases n <;> simp [lcm_ne_zero] at *
@[simp] theorem lcm_eq_zero_iff : lcm m n = 0 m = 0 n = 0 := by
cases m <;> cases n <;> simp [lcm_ne_zero] at *
theorem lcm_eq_iff {n m l : Nat} :
lcm n m = l n l m l ( c, n c m c l c) := by
refine ?_, fun hn, hm, hl => Nat.dvd_antisymm (lcm_dvd hn hm) ?_
· rintro rfl
exact dvd_lcm_left _ _, dvd_lcm_right _ _, fun _ => Nat.lcm_dvd
· exact hl _ (dvd_lcm_left _ _) (dvd_lcm_right _ _)
theorem lcm_div {m n k : Nat} (hkm : k m) (hkn : k n) : lcm (m / k) (n / k) = lcm m n / k := by
refine (Nat.eq_zero_or_pos k).elim (by rintro rfl; simp) (fun hk => lcm_eq_iff.2
Nat.div_dvd_div hkm (dvd_lcm_left m n), Nat.div_dvd_div hkn (dvd_lcm_right m n),
fun c hc₁ hc₂ => ?_)
rw [div_dvd_iff_dvd_mul _ hk] at hc₁ hc₂
· exact lcm_dvd hc₁ hc₂
· exact Nat.dvd_trans hkm (dvd_lcm_left _ _)
· exact hkn
· exact hkm
theorem lcm_dvd_lcm_of_dvd_left {m k : Nat} (n : Nat) (h : m k) : lcm m n lcm k n :=
lcm_dvd (Nat.dvd_trans h (dvd_lcm_left _ _)) (dvd_lcm_right _ _)
theorem lcm_dvd_lcm_of_dvd_right {m k : Nat} (n : Nat) (h : m k) : lcm n m lcm n k :=
lcm_dvd (dvd_lcm_left _ _) (Nat.dvd_trans h (dvd_lcm_right _ _))
theorem lcm_dvd_lcm_mul_left_left (m n k : Nat) : lcm m n lcm (k * m) n :=
lcm_dvd_lcm_of_dvd_left _ (Nat.dvd_mul_left _ _)
theorem lcm_dvd_lcm_mul_right_left (m n k : Nat) : lcm m n lcm (m * k) n :=
lcm_dvd_lcm_of_dvd_left _ (Nat.dvd_mul_right _ _)
theorem lcm_dvd_lcm_mul_left_right (m n k : Nat) : lcm m n lcm m (k * n) :=
lcm_dvd_lcm_of_dvd_right _ (Nat.dvd_mul_left _ _)
theorem lcm_dvd_lcm_mul_right_right (m n k : Nat) : lcm m n lcm m (n * k) :=
lcm_dvd_lcm_of_dvd_right _ (Nat.dvd_mul_right _ _)
theorem lcm_eq_left {m n : Nat} (h : n m) : lcm m n = m :=
lcm_eq_left_iff_dvd.2 h
theorem lcm_eq_right {m n : Nat} (h : m n) : lcm m n = n :=
lcm_eq_right_iff_dvd.2 h
@[simp] theorem lcm_mul_left_left (m n : Nat) : lcm (m * n) n = m * n := by
simpa [lcm_eq_iff, Nat.dvd_mul_left] using fun _ h _ => h
@[simp] theorem lcm_mul_left_right (m n : Nat) : lcm n (m * n) = m * n := by
simp [lcm_eq_iff, Nat.dvd_mul_left]
@[simp] theorem lcm_mul_right_left (m n : Nat) : lcm (n * m) n = n * m := by
simpa [lcm_eq_iff, Nat.dvd_mul_right] using fun _ h _ => h
@[simp] theorem lcm_mul_right_right (m n : Nat) : lcm n (n * m) = n * m := by
simp [lcm_eq_iff, Nat.dvd_mul_right]
@[simp] theorem lcm_lcm_self_right_left (m n : Nat) : lcm m (lcm m n) = lcm m n := by
simp [ lcm_assoc]
@[simp] theorem lcm_lcm_self_right_right (m n : Nat) : lcm m (lcm n m) = lcm m n := by
rw [lcm_comm n m, lcm_lcm_self_right_left]
@[simp] theorem lcm_lcm_self_left_left (m n : Nat) : lcm (lcm m n) m = lcm n m := by
simp [lcm_comm]
@[simp] theorem lcm_lcm_self_left_right (m n : Nat) : lcm (lcm n m) m = lcm n m := by
simp [lcm_comm]
theorem lcm_eq_mul_iff {m n : Nat} : lcm m n = m * n m = 0 n = 0 gcd m n = 1 := by
rw [lcm_eq_mul_div, Nat.div_eq_self, Nat.mul_eq_zero, or_assoc]
@[simp] theorem lcm_eq_one_iff {m n : Nat} : lcm m n = 1 m = 1 n = 1 := by
refine fun h => ?_, ?_, by rintro rfl, rfl; simp <;>
(apply Nat.eq_one_of_dvd_one; simp [ h, dvd_lcm_left, dvd_lcm_right])
theorem lcm_mul_right_dvd_mul_lcm (k m n : Nat) : lcm k (m * n) lcm k m * lcm k n :=
lcm_dvd (Nat.dvd_mul_left_of_dvd (dvd_lcm_left _ _) _)
(Nat.mul_dvd_mul (dvd_lcm_right _ _) (dvd_lcm_right _ _))
theorem lcm_mul_left_dvd_mul_lcm (k m n : Nat) : lcm (m * n) k lcm m k * lcm n k := by
simpa [lcm_comm, Nat.mul_comm] using lcm_mul_right_dvd_mul_lcm _ _ _
theorem lcm_dvd_mul_self_left_iff_dvd_mul {k n m : Nat} : lcm k n k * m n k * m :=
fun h => Nat.dvd_trans (dvd_lcm_right _ _) h, fun h => lcm_dvd (Nat.dvd_mul_right k m) h
theorem lcm_dvd_mul_self_right_iff_dvd_mul {k m n : Nat} : lcm n k m * k n m * k := by
rw [lcm_comm, Nat.mul_comm m, lcm_dvd_mul_self_left_iff_dvd_mul]
theorem lcm_mul_right_right_eq_mul_of_lcm_eq_mul {n m k : Nat} (h : lcm n m = n * m) :
lcm n (m * k) = lcm n k * m := by
rcases lcm_eq_mul_iff.1 h with (rfl|rfl|h) <;> try (simp; done)
rw [Nat.mul_comm _ m, lcm_eq_mul_div, gcd_mul_right_right_of_gcd_eq_one h, Nat.mul_comm,
Nat.mul_assoc, Nat.mul_comm k, Nat.mul_div_assoc _ (gcd_dvd_mul _ _), lcm_eq_mul_div]
theorem lcm_mul_left_right_eq_mul_of_lcm_eq_mul {n m k} (h : lcm n m = n * m) :
lcm n (k * m) = lcm n k * m := by
rw [Nat.mul_comm, lcm_mul_right_right_eq_mul_of_lcm_eq_mul h, Nat.mul_comm]
theorem lcm_mul_right_left_eq_mul_of_lcm_eq_mul {n m k} (h : lcm n m = n * m) :
lcm (n * k) m = n * lcm k m := by
rw [lcm_comm, lcm_mul_right_right_eq_mul_of_lcm_eq_mul, lcm_comm, Nat.mul_comm]
rwa [lcm_comm, Nat.mul_comm]
theorem lcm_mul_left_left_eq_mul_of_lcm_eq_mul {n m k} (h : lcm n m = n * m) :
lcm (k * n) m = n * lcm k m := by
rw [Nat.mul_comm, lcm_mul_right_left_eq_mul_of_lcm_eq_mul h]
theorem pow_lcm_pow {k n m : Nat} : lcm (n ^ k) (m ^ k) = (lcm n m) ^ k := by
rw [lcm_eq_mul_div, pow_gcd_pow, Nat.mul_pow, Nat.div_pow (gcd_dvd_mul _ _), lcm_eq_mul_div]
theorem lcm_ne_zero (hm : m 0) (hn : n 0) : lcm m n 0 := by
intro h
have h1 := gcd_mul_lcm m n
rw [h, Nat.mul_zero] at h1
match mul_eq_zero.1 h1.symm with
| .inl hm1 => exact hm hm1
| .inr hn1 => exact hn hn1
end Nat

View File

@@ -532,12 +532,6 @@ protected theorem pos_of_lt_mul_right {a b c : Nat} (h : a < b * c) : 0 < b := b
replace h : 0 < b * c := by omega
exact Nat.pos_of_mul_pos_right h
protected theorem mul_dvd_mul_iff_left {a b c : Nat} (h : 0 < a) : a * b a * c b c :=
fun k, hk => k, Nat.mul_left_cancel h (Nat.mul_assoc _ _ _ hk), Nat.mul_dvd_mul_left _
protected theorem mul_dvd_mul_iff_right {a b c : Nat} (h : 0 < c) : a * c b * c a b := by
rw [Nat.mul_comm _ c, Nat.mul_comm _ c, Nat.mul_dvd_mul_iff_left h]
/-! ### div/mod -/
theorem mod_two_eq_zero_or_one (n : Nat) : n % 2 = 0 n % 2 = 1 :=
@@ -608,27 +602,6 @@ theorem mod_eq_sub (x w : Nat) : x % w = x - w * (x / w) := by
conv => rhs; congr; rw [ mod_add_div x w]
simp
theorem div_dvd_div {m n k : Nat} : k m m n m / k n / k := by
refine (Nat.eq_zero_or_pos k).elim (by rintro rfl; simp) (fun hk => ?_)
rintro a, rfl b, rfl
rw [Nat.mul_comm, Nat.mul_div_cancel _ hk, Nat.mul_comm, Nat.mul_assoc, Nat.mul_div_cancel _ hk]
exact Nat.dvd_mul_left a b
@[simp] theorem div_dvd_iff_dvd_mul {a b c : Nat} (h : b a) (hb : 0 < b) :
a / b c a b * c := by
rcases h with k, rfl
rw [Nat.mul_comm, Nat.mul_div_cancel _ hb, Nat.mul_comm, Nat.mul_dvd_mul_iff_left hb]
theorem div_eq_self {m n : Nat} : m / n = m m = 0 n = 1 := by
refine fun h => (Nat.eq_zero_or_pos m).elim Or.inl ?_, fun h => by cases h <;> simp_all
refine fun hm => Or.inr ?_
rcases Nat.lt_trichotomy n 1 with (hn|hn|hn)
· obtain rfl : n = 0 := by rwa [lt_one_iff] at hn
obtain rfl : 0 = m := by simpa [Nat.div_zero] using h
simp at hm
· exact hn
· exact False.elim (absurd h (Nat.ne_of_lt (Nat.div_lt_self hm hn)))
/-! ### pow -/
theorem pow_succ' {m n : Nat} : m ^ n.succ = m * m ^ n := by
@@ -653,6 +626,9 @@ protected theorem zero_pow {n : Nat} (H : 0 < n) : 0 ^ n = 0 := by
| zero => rfl
| succ _ ih => rw [Nat.pow_succ, Nat.mul_one, ih]
@[simp] protected theorem pow_one (a : Nat) : a ^ 1 = a := by
rw [Nat.pow_succ, Nat.pow_zero, Nat.one_mul]
protected theorem pow_two (a : Nat) : a ^ 2 = a * a := by rw [Nat.pow_succ, Nat.pow_one]
protected theorem pow_add (a m n : Nat) : a ^ (m + n) = a ^ m * a ^ n := by
@@ -674,6 +650,11 @@ protected theorem pow_mul' (a m n : Nat) : a ^ (m * n) = (a ^ n) ^ m := by
protected theorem pow_right_comm (a m n : Nat) : (a ^ m) ^ n = (a ^ n) ^ m := by
rw [ Nat.pow_mul, Nat.pow_mul']
protected theorem mul_pow (a b n : Nat) : (a * b) ^ n = a ^ n * b ^ n := by
induction n with
| zero => rw [Nat.pow_zero, Nat.pow_zero, Nat.pow_zero, Nat.mul_one]
| succ _ ih => rw [Nat.pow_succ, Nat.pow_succ, Nat.pow_succ, Nat.mul_mul_mul_comm, ih]
protected theorem one_lt_two_pow (h : n 0) : 1 < 2 ^ n :=
match n, h with
| n+1, _ => by
@@ -891,18 +872,6 @@ theorem dvd_of_pow_dvd {p k m : Nat} (hk : 1 ≤ k) (hpk : p ^ k m) : p
protected theorem pow_div {x m n : Nat} (h : n m) (hx : 0 < x) : x ^ m / x ^ n = x ^ (m - n) := by
rw [Nat.div_eq_iff_eq_mul_left (Nat.pow_pos hx) (Nat.pow_dvd_pow _ h), Nat.pow_sub_mul_pow _ h]
protected theorem div_pow {a b c : Nat} (h : a b) : (b / a) ^ c = b ^ c / a ^ c := by
refine (Nat.eq_zero_or_pos c).elim (by rintro rfl; simp) (fun hc => ?_)
refine (Nat.eq_zero_or_pos a).elim (by rintro rfl; simp [hc]) (fun ha => ?_)
rw [eq_comm, Nat.div_eq_iff_eq_mul_left (Nat.pow_pos ha)
((Nat.pow_dvd_pow_iff (Nat.pos_iff_ne_zero.1 hc)).2 h)]
clear hc
induction c with
| zero => simp
| succ c ih =>
rw [Nat.pow_succ (b / a), Nat.pow_succ a, Nat.mul_comm _ a, Nat.mul_assoc, Nat.mul_assoc _ a,
Nat.div_mul_cancel h, Nat.mul_comm b, Nat.mul_assoc, ih, Nat.pow_succ]
/-! ### shiftLeft and shiftRight -/
@[simp] theorem shiftLeft_zero : n <<< 0 = n := rfl

View File

@@ -144,13 +144,6 @@ none
| none => b
| some a => f a rfl
/-- Partial filter. If `o : Option α`, `p : (a : α) → o = some a → Bool`, then `o.pfilter p` is
the same as `o.filter p` but `p` is passed the proof that `o = some a`. -/
@[inline] def pfilter (o : Option α) (p : (a : α) o = some a Bool) : Option α :=
match o with
| none => none
| some a => bif p a rfl then o else none
/--
Executes a monadic action on an optional value if it is present, or does nothing if there is no
value.

View File

@@ -59,15 +59,6 @@ theorem get!_eq_getD [Inhabited α] (o : Option α) : o.get! = o.getD default :=
@[deprecated get!_eq_getD (since := "2024-11-18")] abbrev get!_eq_getD_default := @get!_eq_getD
theorem get_congr {o o' : Option α} {ho : o.isSome} (h : o = o') :
o.get ho = o'.get (h ho) := by
cases h; rfl
theorem get_inj {o1 o2 : Option α} {h1} {h2} :
o1.get h1 = o2.get h2 o1 = o2 := by
match o1, o2, h1, h2 with
| some a, some b, _, _ => simp only [Option.get_some, Option.some.injEq]
theorem mem_unique {o : Option α} {a b : α} (ha : a o) (hb : b o) : a = b :=
some.inj <| ha hb
@@ -84,12 +75,6 @@ theorem isSome_iff_exists : isSome x ↔ ∃ a, x = some a := by cases x <;> sim
theorem isSome_eq_isSome : (isSome x = isSome y) (x = none y = none) := by
cases x <;> cases y <;> simp
theorem isSome_of_mem {x : Option α} {y : α} (h : y x) : x.isSome := by
cases x <;> trivial
theorem isSome_of_eq_some {x : Option α} {y : α} (h : x = some y) : x.isSome := by
cases x <;> trivial
@[simp] theorem not_isSome : isSome a = false a.isNone = true := by
cases a <;> simp
@@ -157,23 +142,6 @@ theorem bind_congr {α β} {o : Option α} {f g : α → Option β} :
(h : a, o = some a f a = g a) o.bind f = o.bind g := by
cases o <;> simp
theorem isSome_bind {α β : Type _} (x : Option α) (f : α Option β) :
(x.bind f).isSome = x.any (fun x => (f x).isSome) := by
cases x <;> rfl
theorem isSome_of_isSome_bind {α β : Type _} {x : Option α} {f : α Option β}
(h : (x.bind f).isSome) : x.isSome := by
cases x <;> trivial
theorem isSome_apply_of_isSome_bind {α β : Type _} {x : Option α} {f : α Option β}
(h : (x.bind f).isSome) : (f (x.get (isSome_of_isSome_bind h))).isSome := by
cases x <;> trivial
@[simp] theorem get_bind {α β : Type _} {x : Option α} {f : α Option β} (h : (x.bind f).isSome) :
(x.bind f).get h = (f (x.get (isSome_of_isSome_bind h))).get
(isSome_apply_of_isSome_bind h) := by
cases x <;> trivial
theorem join_eq_some : x.join = some a x = some (some a) := by
simp [bind_eq_some]
@@ -253,11 +221,11 @@ theorem map_inj_right {f : α → β} {o o' : Option α} (w : ∀ x y, f x = f y
| none => simp
| some a' => simpa using fun h => w _ _ h, fun h => congrArg f h
@[simp] theorem map_if {f : α β} {_ : Decidable c} :
@[simp] theorem map_if {f : α β} [Decidable c] :
(if c then some a else none).map f = if c then some (f a) else none := by
split <;> rfl
@[simp] theorem map_dif {f : α β} {_ : Decidable c} {a : c α} :
@[simp] theorem map_dif {f : α β} [Decidable c] {a : c α} :
(if h : c then some (a h) else none).map f = if h : c then some (f (a h)) else none := by
split <;> rfl
@@ -272,8 +240,8 @@ theorem isSome_of_isSome_filter (p : α → Bool) (o : Option α) (h : (o.filter
@[deprecated isSome_of_isSome_filter (since := "2025-03-18")]
abbrev isSome_filter_of_isSome := @isSome_of_isSome_filter
@[simp] theorem filter_eq_none {o : Option α} {p : α Bool} :
o.filter p = none a, a o ¬ p a := by
@[simp] theorem filter_eq_none {p : α Bool} :
o.filter p = none o = none a, a o ¬ p a := by
cases o <;> simp [filter_some]
@[simp] theorem filter_eq_some {o : Option α} {p : α Bool} :
@@ -294,10 +262,6 @@ theorem mem_filter_iff {p : α → Bool} {a : α} {o : Option α} :
a o.filter p a o p a := by
simp
theorem filter_eq_bind (x : Option α) (p : α Bool) :
x.filter p = x.bind (Option.guard (fun a => p a)) := by
cases x <;> rfl
@[simp] theorem all_guard (p : α Prop) [DecidablePred p] (a : α) :
Option.all q (guard p a) = (!p a || q a) := by
simp only [guard]
@@ -308,45 +272,6 @@ theorem filter_eq_bind (x : Option α) (p : α → Bool) :
simp only [guard]
split <;> simp_all
theorem all_eq_true (p : α Bool) (x : Option α) :
x.all p = true y, x = some y p y := by
cases x <;> simp
theorem all_eq_true_iff_get (p : α Bool) (x : Option α) :
x.all p = true (h : x.isSome) p (x.get h) := by
cases x <;> simp
theorem all_eq_false (p : α Bool) (x : Option α) :
x.all p = false y, x = some y p y = false := by
cases x <;> simp
theorem all_eq_false_iff_get (p : α Bool) (x : Option α) :
x.all p = false h : x.isSome, p (x.get h) = false := by
cases x <;> simp
theorem any_eq_true (p : α Bool) (x : Option α) :
x.any p = true y, x = some y p y := by
cases x <;> simp
theorem any_eq_true_iff_get (p : α Bool) (x : Option α) :
x.any p = true h : x.isSome, p (x.get h) := by
cases x <;> simp
theorem any_eq_false (p : α Bool) (x : Option α) :
x.any p = false y, x = some y p y = false := by
cases x <;> simp
theorem any_eq_false_iff_get (p : α Bool) (x : Option α) :
x.any p = false (h : x.isSome) p (x.get h) = false := by
cases x <;> simp
theorem isSome_of_any {x : Option α} {p : α Bool} (h : x.any p) : x.isSome := by
cases x <;> trivial
theorem any_map {α β : Type _} {x : Option α} {f : α β} {p : β Bool} :
(x.map f).any p = x.any (fun a => p (f a)) := by
cases x <;> rfl
theorem bind_map_comm {α β} {x : Option (Option α)} {f : α β} :
x.bind (Option.map f) = (x.map (Option.map f)).bind id := by cases x <;> simp
@@ -410,18 +335,6 @@ theorem guard_comp {p : α → Prop} [DecidablePred p] {f : β → α} :
ext1 b
simp [guard]
theorem bind_guard (x : Option α) (p : α Prop) {_ : DecidablePred p} :
x.bind (Option.guard p) = x.filter p := by
simp only [Option.filter_eq_bind, decide_eq_true_eq]
theorem guard_eq_map (p : α Prop) [DecidablePred p] :
Option.guard p = fun x => Option.map (fun _ => x) (if p x then some x else none) := by
funext x
simp [Option.guard]
theorem guard_def (p : α Prop) {_ : DecidablePred p} :
Option.guard p = fun x => if p x then some x else none := rfl
theorem liftOrGet_eq_or_eq {f : α α α} (h : a b, f a b = a f a b = b) :
o₁ o₂, liftOrGet f o₁ o₂ = o₁ liftOrGet f o₁ o₂ = o₂
| none, none => .inl rfl
@@ -588,104 +501,90 @@ end beq
/-! ### ite -/
section ite
@[simp] theorem dite_none_left_eq_some {p : Prop} {_ : Decidable p} {b : ¬p Option β} :
@[simp] theorem dite_none_left_eq_some {p : Prop} [Decidable p] {b : ¬p Option β} :
(if h : p then none else b h) = some a h, b h = some a := by
split <;> simp_all
@[simp] theorem dite_none_right_eq_some {p : Prop} {_ : Decidable p} {b : p Option α} :
@[simp] theorem dite_none_right_eq_some {p : Prop} [Decidable p] {b : p Option α} :
(if h : p then b h else none) = some a h, b h = some a := by
split <;> simp_all
@[simp] theorem some_eq_dite_none_left {p : Prop} {_ : Decidable p} {b : ¬p Option β} :
@[simp] theorem some_eq_dite_none_left {p : Prop} [Decidable p] {b : ¬p Option β} :
some a = (if h : p then none else b h) h, some a = b h := by
split <;> simp_all
@[simp] theorem some_eq_dite_none_right {p : Prop} {_ : Decidable p} {b : p Option α} :
@[simp] theorem some_eq_dite_none_right {p : Prop} [Decidable p] {b : p Option α} :
some a = (if h : p then b h else none) h, some a = b h := by
split <;> simp_all
@[simp] theorem ite_none_left_eq_some {p : Prop} {_ : Decidable p} {b : Option β} :
@[simp] theorem ite_none_left_eq_some {p : Prop} [Decidable p] {b : Option β} :
(if p then none else b) = some a ¬ p b = some a := by
split <;> simp_all
@[simp] theorem ite_none_right_eq_some {p : Prop} {_ : Decidable p} {b : Option α} :
@[simp] theorem ite_none_right_eq_some {p : Prop} [Decidable p] {b : Option α} :
(if p then b else none) = some a p b = some a := by
split <;> simp_all
@[simp] theorem some_eq_ite_none_left {p : Prop} {_ : Decidable p} {b : Option β} :
@[simp] theorem some_eq_ite_none_left {p : Prop} [Decidable p] {b : Option β} :
some a = (if p then none else b) ¬ p some a = b := by
split <;> simp_all
@[simp] theorem some_eq_ite_none_right {p : Prop} {_ : Decidable p} {b : Option α} :
@[simp] theorem some_eq_ite_none_right {p : Prop} [Decidable p] {b : Option α} :
some a = (if p then b else none) p some a = b := by
split <;> simp_all
theorem mem_dite_none_left {x : α} {_ : Decidable p} {l : ¬ p Option α} :
theorem mem_dite_none_left {x : α} [Decidable p] {l : ¬ p Option α} :
(x if h : p then none else l h) h : ¬ p, x l h := by
simp
theorem mem_dite_none_right {x : α} {_ : Decidable p} {l : p Option α} :
theorem mem_dite_none_right {x : α} [Decidable p] {l : p Option α} :
(x if h : p then l h else none) h : p, x l h := by
simp
theorem mem_ite_none_left {x : α} {_ : Decidable p} {l : Option α} :
theorem mem_ite_none_left {x : α} [Decidable p] {l : Option α} :
(x if p then none else l) ¬ p x l := by
simp
theorem mem_ite_none_right {x : α} {_ : Decidable p} {l : Option α} :
theorem mem_ite_none_right {x : α} [Decidable p] {l : Option α} :
(x if p then l else none) p x l := by
simp
@[simp] theorem isSome_dite {p : Prop} {_ : Decidable p} {b : p β} :
@[simp] theorem isSome_dite {p : Prop} [Decidable p] {b : p β} :
(if h : p then some (b h) else none).isSome = true p := by
split <;> simpa
@[simp] theorem isSome_ite {p : Prop} {_ : Decidable p} :
@[simp] theorem isSome_ite {p : Prop} [Decidable p] :
(if p then some b else none).isSome = true p := by
split <;> simpa
@[simp] theorem isSome_dite' {p : Prop} {_ : Decidable p} {b : ¬ p β} :
@[simp] theorem isSome_dite' {p : Prop} [Decidable p] {b : ¬ p β} :
(if h : p then none else some (b h)).isSome = true ¬ p := by
split <;> simpa
@[simp] theorem isSome_ite' {p : Prop} {_ : Decidable p} :
@[simp] theorem isSome_ite' {p : Prop} [Decidable p] :
(if p then none else some b).isSome = true ¬ p := by
split <;> simpa
@[simp] theorem get_dite {p : Prop} {_ : Decidable p} (b : p β) (w) :
@[simp] theorem get_dite {p : Prop} [Decidable p] (b : p β) (w) :
(if h : p then some (b h) else none).get w = b (by simpa using w) := by
split
· simp
· exfalso
simp at w
contradiction
@[simp] theorem get_ite {p : Prop} {_ : Decidable p} (h) :
@[simp] theorem get_ite {p : Prop} [Decidable p] (h) :
(if p then some b else none).get h = b := by
simpa using get_dite (p := p) (fun _ => b) (by simpa using h)
@[simp] theorem get_dite' {p : Prop} {_ : Decidable p} (b : ¬ p β) (w) :
@[simp] theorem get_dite' {p : Prop} [Decidable p] (b : ¬ p β) (w) :
(if h : p then none else some (b h)).get w = b (by simpa using w) := by
split
· exfalso
simp at w
contradiction
· simp
@[simp] theorem get_ite' {p : Prop} {_ : Decidable p} (h) :
@[simp] theorem get_ite' {p : Prop} [Decidable p] (h) :
(if p then none else some b).get h = b := by
simpa using get_dite' (p := p) (fun _ => b) (by simpa using h)
end ite
theorem isSome_filter {α : Type _} {x : Option α} {f : α Bool} :
(x.filter f).isSome = x.any f := by
cases x
· rfl
· rw [Bool.eq_iff_iff]
simp only [Option.any_some, Option.filter, Option.isSome_ite]
@[simp] theorem get_filter {α : Type _} {x : Option α} {f : α Bool} (h : (x.filter f).isSome) :
(x.filter f).get h = x.get (isSome_of_isSome_filter f x h) := by
cases x
· contradiction
· unfold Option.filter
simp only [Option.get_ite, Option.get_some]
/-! ### pbind -/
@[simp] theorem pbind_none : pbind none f = none := rfl
@@ -693,16 +592,7 @@ theorem isSome_filter {α : Type _} {x : Option α} {f : α → Bool} :
@[simp] theorem map_pbind {o : Option α} {f : (a : α) a o Option β} {g : β γ} :
(o.pbind f).map g = o.pbind (fun a h => (f a h).map g) := by
cases o <;> rfl
@[simp] theorem pbind_map {α β γ : Type _} (o : Option α)
(f : α β) (g : (x : β) o.map f = some x Option γ) :
(o.map f).pbind g = o.pbind (fun x h => g (f x) (h rfl)) := by
cases o <;> rfl
@[simp] theorem pbind_eq_bind {α β : Type _} (o : Option α)
(f : α Option β) : o.pbind (fun x _ => f x) = o.bind f := by
cases o <;> rfl
cases o <;> simp
@[congr] theorem pbind_congr {o o' : Option α} (ho : o = o')
{f : (a : α) a o Option β} {g : (a : α) a o' Option β}
@@ -712,20 +602,39 @@ theorem isSome_filter {α : Type _} {x : Option α} {f : α → Bool} :
theorem pbind_eq_none_iff {o : Option α} {f : (a : α) a o Option β} :
o.pbind f = none o = none a h, f a h = none := by
cases o <;> simp
cases o with
| none => simp
| some a =>
simp only [pbind_some, reduceCtorEq, mem_def, some.injEq, false_or]
constructor
· intro h
exact a, rfl, h
· rintro a, rfl, h
exact h
theorem isSome_pbind_iff {o : Option α} {f : (a : α) a o Option β} :
(o.pbind f).isSome a h, (f a h).isSome := by
cases o <;> simp
@[deprecated "isSome_pbind_iff" (since := "2025-04-01")]
theorem pbind_isSome {o : Option α} {f : (a : α) a o Option β} :
(o.pbind f).isSome = a h, (f a h).isSome := by
exact propext isSome_pbind_iff
cases o with
| none => simp
| some a =>
simp only [pbind_some, mem_def, some.injEq, eq_iff_iff]
constructor
· intro h
exact a, rfl, h
· rintro a, rfl, h
exact h
theorem pbind_eq_some_iff {o : Option α} {f : (a : α) a o Option β} {b : β} :
o.pbind f = some b a h, f a h = some b := by
cases o <;> simp
cases o with
| none => simp
| some a =>
simp only [pbind_some, mem_def, some.injEq]
constructor
· intro h
exact a, rfl, h
· rintro a, rfl, h
exact h
/-! ### pmap -/
@@ -739,12 +648,10 @@ theorem pbind_eq_some_iff {o : Option α} {f : (a : α) → a ∈ o → Option
pmap f o h = none o = none := by
cases o <;> simp
@[simp] theorem isSome_pmap {p : α Prop} {f : (a : α), p a β} {o : Option α} {h} :
@[simp] theorem pmap_isSome {p : α Prop} {f : (a : α), p a β} {o : Option α} {h} :
(pmap f o h).isSome = o.isSome := by
cases o <;> simp
@[deprecated isSome_pmap (since := "2025-04-01")] abbrev pmap_isSome := @isSome_pmap
@[simp] theorem pmap_eq_some_iff {p : α Prop} {f : (a : α), p a β} {o : Option α} {h} :
pmap f o h = some b (a : α) (h : p a), o = some a b = f a h := by
cases o with
@@ -770,28 +677,6 @@ theorem pmap_map (o : Option α) (f : α → β) {p : β → Prop} (g : ∀ b, p
pmap (fun a h => g (f a) h) o (fun a m => H (f a) (mem_map_of_mem f m)) := by
cases o <;> simp
theorem pmap_pred_congr {α : Type u}
{p p' : α Prop} (hp : x, p x p' x)
{o o' : Option α} (ho : o = o')
(h : x, x o p x) : x, x o' p' x := by
intro y hy
cases ho
exact (hp y).mp (h y hy)
@[congr]
theorem pmap_congr {α : Type u} {β : Type v}
{p p' : α Prop} (hp : x, p x p' x)
{f : (x : α) p x β} {f' : (x : α) p' x β}
(hf : x h, f x ((hp x).mpr h) = f' x h)
{o o' : Option α} (ho : o = o')
{h : x, x o p x} :
Option.pmap f o h = Option.pmap f' o' (Option.pmap_pred_congr hp ho h) := by
cases ho
cases o
· rfl
· dsimp
rw [hf]
/-! ### pelim -/
@[simp] theorem pelim_none : pelim none b f = b := rfl
@@ -806,69 +691,6 @@ theorem pmap_congr {α : Type u} {β : Type v}
o.pelim g (fun a h => g' (f a (H a h))) := by
cases o <;> simp
/-! ### pfilter -/
@[congr]
theorem pfilter_congr {α : Type u} {o o' : Option α} (ho : o = o')
{f : (a : α) o = some a Bool} {g : (a : α) o' = some a Bool}
(hf : a ha, f a (ho.trans ha) = g a ha) :
o.pfilter f = o'.pfilter g := by
cases ho
congr; funext a ha
exact hf a ha
@[simp] theorem pfilter_none {α : Type _} {p : (a : α) none = some a Bool} :
none.pfilter p = none := by
rfl
@[simp] theorem pfilter_some {α : Type _} {x : α} {p : (a : α) some x = some a Bool} :
(some x).pfilter p = if p x rfl then some x else none := by
simp only [pfilter, cond_eq_if]
theorem isSome_pfilter_iff {α : Type _} {o : Option α} {p : (a : α) o = some a Bool} :
(o.pfilter p).isSome (a : α) (ha : o = some a), p a ha := by
cases o <;> simp
theorem isSome_pfilter_iff_get {α : Type _} {o : Option α} {p : (a : α) o = some a Bool} :
(o.pfilter p).isSome (h : o.isSome), p (o.get h) (get_mem h) := by
cases o <;> simp
theorem isSome_of_isSome_pfilter {α : Type _} {o : Option α} {p : (a : α) o = some a Bool}
(h : (o.pfilter p).isSome) : o.isSome :=
(isSome_pfilter_iff_get.mp h).1
@[simp] theorem get_pfilter {α : Type _} {o : Option α} {p : (a : α) o = some a Bool}
(h : (o.pfilter p).isSome) :
(o.pfilter p).get h = o.get (isSome_of_isSome_pfilter h) := by
cases o <;> simp
theorem pfilter_eq_none_iff {α : Type _} {o : Option α} {p : (a : α) o = some a Bool} :
o.pfilter p = none o = none (a : α) (ha : o = some a), p a ha = false := by
cases o <;> simp
theorem pfilter_eq_some_iff {α : Type _} {o : Option α} {p : (a : α) o = some a Bool}
{a : α} : o.pfilter p = some a ha, p a ha = true := by
simp only [eq_some_iff_get_eq, get_pfilter, isSome_pfilter_iff]
constructor
· rintro b, hb, rfl, hb', rfl
exact hb, rfl, hb'
· rintro h, rfl, h'
exact o.get h, h, rfl, h', rfl
@[simp] theorem pfilter_eq_filter {α : Type _} {o : Option α} {p : α Bool} :
o.pfilter (fun a _ => p a) = o.filter p := by
cases o with
| none => rfl
| some a =>
simp only [pfilter, Option.filter, Bool.cond_eq_ite_iff]
theorem pfilter_eq_pbind_ite {α : Type _} {o : Option α}
{p : (a : α) o = some a Bool} :
o.pfilter p = o.pbind (fun a h => if p a h then some a else none) := by
cases o
· rfl
· simp only [Option.pfilter, Bool.cond_eq_ite, Option.pbind_some]
/-! ### LT and LE -/
@[simp] theorem not_lt_none [LT α] {a : Option α} : ¬ a < none := by cases a <;> simp [LT.lt, Option.lt]

View File

@@ -7,8 +7,6 @@ Authors: Dany Fabian, Sebastian Ullrich
prelude
import Init.Data.String
import Init.Data.Array.Basic
import Init.Data.SInt.Basic
import Init.Data.Vector
/--
The result of a comparison according to a total order.
@@ -306,27 +304,6 @@ theorem then_eq_eq {o₁ o₂ : Ordering} : o₁.then o₂ = eq ↔ o₁ = eq
theorem then_eq_gt {o₁ o₂ : Ordering} : o₁.then o₂ = gt o₁ = gt o₁ = eq o₂ = gt := by
cases o₁ <;> cases o₂ <;> decide
@[simp]
theorem lt_then {o : Ordering} : lt.then o = lt := rfl
@[simp]
theorem gt_then {o : Ordering} : gt.then o = gt := rfl
@[simp]
theorem eq_then {o : Ordering} : eq.then o = o := rfl
theorem isLE_then_iff_or {o₁ o₂ : Ordering} : (o₁.then o₂).isLE o₁ = lt (o₁ = eq o₂.isLE) := by
cases o₁ <;> simp
theorem isLE_then_iff_and {o₁ o₂ : Ordering} : (o₁.then o₂).isLE o₁.isLE (o₁ = lt o₂.isLE) := by
cases o₁ <;> simp
theorem isLE_left_of_isLE_then {o₁ o₂ : Ordering} (h : (o₁.then o₂).isLE) : o₁.isLE := by
cases o₁ <;> simp_all
theorem isGE_left_of_isGE_then {o₁ o₂ : Ordering} (h : (o₁.then o₂).isGE) : o₁.isGE := by
cases o₁ <;> simp_all
end Lemmas
end Ordering
@@ -368,104 +345,6 @@ To lexicographically combine two `Ordering`s, use `Ordering.then`.
@[inline] def compareLex (cmp₁ cmp₂ : α β Ordering) (a : α) (b : β) : Ordering :=
(cmp₁ a b).then (cmp₂ a b)
section Lemmas
@[simp]
theorem compareLex_eq_eq {α} {cmp₁ cmp₂} {a b : α} :
compareLex cmp₁ cmp₂ a b = .eq cmp₁ a b = .eq cmp₂ a b = .eq := by
simp [compareLex, Ordering.then_eq_eq]
theorem compareOfLessAndEq_eq_swap_of_lt_iff_not_gt_and_ne {α : Type u} [LT α] [DecidableLT α] [DecidableEq α]
(h : x y : α, x < y ¬ y < x x y) {x y : α} :
compareOfLessAndEq x y = (compareOfLessAndEq y x).swap := by
simp only [compareOfLessAndEq]
split
· rename_i h'
rw [h] at h'
simp only [h'.1, h'.2.symm, reduceIte, Ordering.swap_gt]
· split
· rename_i h'
have : ¬ y < y := Not.imp (·.2 rfl) <| (h y y).mp
simp only [h', this, reduceIte, Ordering.swap_eq]
· rename_i h' h''
replace h' := (h y x).mpr h', Ne.symm h''
simp only [h', Ne.symm h'', reduceIte, Ordering.swap_lt]
theorem lt_iff_not_gt_and_ne_of_antisymm_of_total_of_not_le
{α : Type u} [LT α] [LE α] [DecidableLT α] [DecidableEq α]
(antisymm : {x y : α}, x y y x x = y)
(total : (x y : α), x y y x) (not_le : {x y : α}, ¬ x y y < x) (x y : α) :
x < y ¬ y < x x y := by
simp only [ not_le, Classical.not_not]
constructor
· intro h
have refl := by cases total y y <;> assumption
exact (total _ _).resolve_left h, fun h' => (h' h) refl
· intro h₁, h₂ h₃
exact h₂ (antisymm h₁ h₃)
theorem compareOfLessAndEq_eq_swap
{α : Type u} [LT α] [LE α] [DecidableLT α] [DecidableEq α]
(antisymm : {x y : α}, x y y x x = y)
(total : (x y : α), x y y x) (not_le : {x y : α}, ¬ x y y < x) {x y : α} :
compareOfLessAndEq x y = (compareOfLessAndEq y x).swap := by
apply compareOfLessAndEq_eq_swap_of_lt_iff_not_gt_and_ne
exact lt_iff_not_gt_and_ne_of_antisymm_of_total_of_not_le antisymm total not_le
@[simp]
theorem compareOfLessAndEq_eq_lt
{α : Type u} [LT α] [LE α] [DecidableLT α] [DecidableEq α] {x y : α} :
compareOfLessAndEq x y = .lt x < y := by
rw [compareOfLessAndEq]
repeat' split <;> simp_all
theorem compareOfLessAndEq_eq_eq
{α : Type u} [LT α] [LE α] [DecidableLT α] [DecidableLE α] [DecidableEq α]
(refl : (x : α), x x) (not_le : {x y : α}, ¬ x y y < x) {x y : α} :
compareOfLessAndEq x y = .eq x = y := by
rw [compareOfLessAndEq]
repeat' split <;> try (simp_all; done)
simp only [reduceCtorEq, false_iff]
rintro rfl
rename_i hlt
simp [ not_le] at hlt
exact hlt (refl x)
theorem compareOfLessAndEq_eq_gt_of_lt_iff_not_gt_and_ne
{α : Type u} [LT α] [LE α] [DecidableLT α] [DecidableEq α] {x y : α}
(h : x y : α, x < y ¬ y < x x y) :
compareOfLessAndEq x y = .gt y < x := by
rw [compareOfLessAndEq_eq_swap_of_lt_iff_not_gt_and_ne h, Ordering.swap_eq_gt]
exact compareOfLessAndEq_eq_lt
theorem compareOfLessAndEq_eq_gt
{α : Type u} [LT α] [LE α] [DecidableLT α] [DecidableEq α]
(antisymm : {x y : α}, x y y x x = y)
(total : (x y : α), x y y x) (not_le : {x y : α}, ¬ x y y < x) (x y : α) :
compareOfLessAndEq x y = .gt y < x := by
apply compareOfLessAndEq_eq_gt_of_lt_iff_not_gt_and_ne
exact lt_iff_not_gt_and_ne_of_antisymm_of_total_of_not_le antisymm total not_le
theorem isLE_compareOfLessAndEq
{α : Type u} [LT α] [LE α] [DecidableLT α] [DecidableLE α] [DecidableEq α]
(antisymm : {x y : α}, x y y x x = y)
(not_le : {x y : α}, ¬ x y y < x) (total : (x y : α), x y y x) {x y : α} :
(compareOfLessAndEq x y).isLE x y := by
have refl (a : α) := by cases total a a <;> assumption
rw [Ordering.isLE_iff_eq_lt_or_eq_eq, compareOfLessAndEq_eq_lt,
compareOfLessAndEq_eq_eq refl not_le]
constructor
· rintro (h | rfl)
· rw [ not_le] at h
exact total _ _ |>.resolve_left h
· exact refl x
· intro hle
by_cases hge : x y
· exact Or.inr <| antisymm hle hge
· exact Or.inl <| not_le.mp hge
end Lemmas
/--
`Ord α` provides a computable total order on `α`, in terms of the
`compare : αα → Ordering` function.
@@ -532,24 +411,6 @@ instance : Ord USize where
instance : Ord Char where
compare x y := compareOfLessAndEq x y
instance : Ord Int8 where
compare x y := compareOfLessAndEq x y
instance : Ord Int16 where
compare x y := compareOfLessAndEq x y
instance : Ord Int32 where
compare x y := compareOfLessAndEq x y
instance : Ord Int64 where
compare x y := compareOfLessAndEq x y
instance : Ord ISize where
compare x y := compareOfLessAndEq x y
instance {n} : Ord (BitVec n) where
compare x y := compareOfLessAndEq x y
instance [Ord α] : Ord (Option α) where
compare
| none, none => .eq
@@ -557,207 +418,6 @@ instance [Ord α] : Ord (Option α) where
| some _, none => .gt
| some x, some y => compare x y
instance : Ord Ordering where
compare := compareOn (·.toCtorIdx)
namespace List
@[specialize]
protected def compareLex {α} (cmp : α α Ordering) :
List α List α Ordering
| [], [] => .eq
| [], _ => .lt
| _, [] => .gt
| x :: xs, y :: ys => match cmp x y with
| .lt => .lt
| .eq => xs.compareLex cmp ys
| .gt => .gt
instance {α} [Ord α] : Ord (List α) where
compare := List.compareLex compare
protected theorem compare_eq_compareLex {α} [Ord α] :
compare (α := List α) = List.compareLex compare := rfl
protected theorem compareLex_cons_cons {α} {cmp} {x y : α} {xs ys : List α} :
(x :: xs).compareLex cmp (y :: ys) = (cmp x y).then (xs.compareLex cmp ys) := by
rw [List.compareLex]
split <;> simp_all
@[simp]
protected theorem compare_cons_cons {α} [Ord α] {x y : α} {xs ys : List α} :
compare (x :: xs) (y :: ys) = (compare x y).then (compare xs ys) :=
List.compareLex_cons_cons
protected theorem compareLex_nil_cons {α} {cmp} {x : α} {xs : List α} :
[].compareLex cmp (x :: xs) = .lt :=
rfl
@[simp]
protected theorem compare_nil_cons {α} [Ord α] {x : α} {xs : List α} :
compare [] (x :: xs) = .lt :=
rfl
protected theorem compareLex_cons_nil {α} {cmp} {x : α} {xs : List α} :
(x :: xs).compareLex cmp [] = .gt :=
rfl
@[simp]
protected theorem compare_cons_nil {α} [Ord α] {x : α} {xs : List α} :
compare (x :: xs) [] = .gt :=
rfl
protected theorem compareLex_nil_nil {α} {cmp} :
[].compareLex (α := α) cmp [] = .eq :=
rfl
@[simp]
protected theorem compare_nil_nil {α} [Ord α] :
compare (α := List α) [] [] = .eq :=
rfl
protected theorem isLE_compareLex_nil_left {α} {cmp} {xs : List α} :
(List.compareLex (cmp := cmp) [] xs).isLE := by
cases xs <;> simp [List.compareLex_nil_nil, List.compareLex_nil_cons]
protected theorem isLE_compare_nil_left {α} [Ord α] {xs : List α} :
(compare [] xs).isLE :=
List.isLE_compareLex_nil_left
protected theorem isLE_compareLex_nil_right {α} {cmp} {xs : List α} :
(List.compareLex (cmp := cmp) xs []).isLE xs = [] := by
cases xs <;> simp [List.compareLex_nil_nil, List.compareLex_cons_nil]
@[simp]
protected theorem isLE_compare_nil_right {α} [Ord α] {xs : List α} :
(compare xs []).isLE xs = [] :=
List.isLE_compareLex_nil_right
protected theorem isGE_compareLex_nil_left {α} {cmp} {xs : List α} :
(List.compareLex (cmp := cmp) [] xs).isGE xs = [] := by
cases xs <;> simp [List.compareLex_nil_nil, List.compareLex_nil_cons]
@[simp]
protected theorem isGE_compare_nil_left {α} [Ord α] {xs : List α} :
(compare [] xs).isGE xs = [] :=
List.isGE_compareLex_nil_left
protected theorem isGE_compareLex_nil_right {α} {cmp} {xs : List α} :
(List.compareLex (cmp := cmp) xs []).isGE := by
cases xs <;> simp [List.compareLex_nil_nil, List.compareLex_cons_nil]
protected theorem isGE_compare_nil_right {α} [Ord α] {xs : List α} :
(compare xs []).isGE :=
List.isGE_compareLex_nil_right
protected theorem compareLex_nil_left_eq_eq {α} {cmp} {xs : List α} :
List.compareLex cmp [] xs = .eq xs = [] := by
cases xs <;> simp [List.compareLex_nil_nil, List.compareLex_nil_cons]
@[simp]
protected theorem compare_nil_left_eq_eq {α} [Ord α] {xs : List α} :
compare [] xs = .eq xs = [] :=
List.compareLex_nil_left_eq_eq
protected theorem compareLex_nil_right_eq_eq {α} {cmp} {xs : List α} :
xs.compareLex cmp [] = .eq xs = [] := by
cases xs <;> simp [List.compareLex_nil_nil, List.compareLex_cons_nil]
@[simp]
protected theorem compare_nil_right_eq_eq {α} [Ord α] {xs : List α} :
compare xs [] = .eq xs = [] :=
List.compareLex_nil_right_eq_eq
end List
namespace Array
@[specialize]
protected def compareLex {α} (cmp : α α Ordering) (a₁ a₂ : Array α) : Ordering :=
go 0
where go i :=
if h₁ : a₁.size <= i then
if a₂.size <= i then .eq else .lt
else
if h₂ : a₂.size <= i then
.gt
else match cmp a₁[i] a₂[i] with
| .lt => .lt
| .eq => go (i + 1)
| .gt => .gt
termination_by a₁.size - i
instance {α} [Ord α] : Ord (Array α) where
compare := Array.compareLex compare
protected theorem compare_eq_compareLex {α} [Ord α] :
compare (α := Array α) = Array.compareLex compare := rfl
private theorem compareLex.go_succ {α} {cmp} {x₁ x₂} {a₁ a₂ : List α} {i} :
compareLex.go cmp (x₁ :: a₁).toArray (x₂ :: a₂).toArray (i + 1) =
compareLex.go cmp a₁.toArray a₂.toArray i := by
induction i using Array.compareLex.go.induct cmp a₁.toArray a₂.toArray
all_goals try
conv => congr <;> rw [compareLex.go]
simp
repeat' split <;> (try simp_all; done)
protected theorem _root_.List.compareLex_eq_compareLex_toArray {α} {cmp} {l₁ l₂ : List α} :
List.compareLex cmp l₁ l₂ = Array.compareLex cmp l₁.toArray l₂.toArray := by
simp only [Array.compareLex]
induction l₁ generalizing l₂ with
| nil =>
cases l₂
· simp [Array.compareLex.go, List.compareLex_nil_nil]
· simp [Array.compareLex.go, List.compareLex_nil_cons]
| cons x xs ih =>
cases l₂
· simp [Array.compareLex.go, List.compareLex_cons_nil]
· rw [Array.compareLex.go, List.compareLex_cons_cons]
simp only [List.size_toArray, List.length_cons, Nat.le_zero_eq, Nat.add_one_ne_zero,
reduceDIte, List.getElem_toArray, List.getElem_cons_zero, Nat.zero_add]
split <;> simp_all [compareLex.go_succ]
protected theorem _root_.List.compare_eq_compare_toArray {α} [Ord α] {l₁ l₂ : List α} :
compare l₁ l₂ = compare l₁.toArray l₂.toArray :=
List.compareLex_eq_compareLex_toArray
protected theorem compareLex_eq_compareLex_toList {α} {cmp} {a₁ a₂ : Array α} :
Array.compareLex cmp a₁ a₂ = List.compareLex cmp a₁.toList a₂.toList := by
rw [List.compareLex_eq_compareLex_toArray]
protected theorem compare_eq_compare_toList {α} [Ord α] {a₁ a₂ : Array α} :
compare a₁ a₂ = compare a₁.toList a₂.toList :=
Array.compareLex_eq_compareLex_toList
end Array
namespace Vector
protected def compareLex {α n} (cmp : α α Ordering) (a b : Vector α n) : Ordering :=
Array.compareLex cmp a.toArray b.toArray
instance {α n} [Ord α] : Ord (Vector α n) where
compare := Vector.compareLex compare
protected theorem compareLex_eq_compareLex_toArray {α n cmp} {a b : Vector α n} :
Vector.compareLex cmp a b = Array.compareLex cmp a.toArray b.toArray :=
rfl
protected theorem compareLex_eq_compareLex_toList {α n cmp} {a b : Vector α n} :
Vector.compareLex cmp a b = List.compareLex cmp a.toList b.toList :=
Array.compareLex_eq_compareLex_toList
protected theorem compare_eq_compare_toArray {α n} [Ord α] {a b : Vector α n} :
compare a b = compare a.toArray b.toArray :=
rfl
protected theorem compare_eq_compare_toList {α n} [Ord α] {a b : Vector α n} :
compare a b = compare a.toList b.toList :=
Array.compare_eq_compare_toList
end Vector
/-- The lexicographic order on pairs. -/
def lexOrd [Ord α] [Ord β] : Ord (α × β) where
compare := compareLex (compareOn (·.1)) (compareOn (·.2))

View File

@@ -204,7 +204,7 @@ operator.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_int8_add"]
protected def Int8.add (a b : Int8) : Int8 := a.toBitVec + b.toBitVec
def Int8.add (a b : Int8) : Int8 := a.toBitVec + b.toBitVec
/--
Subtracts one 8-bit signed integer from another, wrapping around on over- or underflow. Usually
accessed via the `-` operator.
@@ -212,7 +212,7 @@ accessed via the `-` operator.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_int8_sub"]
protected def Int8.sub (a b : Int8) : Int8 := a.toBitVec - b.toBitVec
def Int8.sub (a b : Int8) : Int8 := a.toBitVec - b.toBitVec
/--
Multiplies two 8-bit signed integers, wrapping around on over- or underflow. Usually accessed via
the `*` operator.
@@ -220,7 +220,7 @@ the `*` operator.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_int8_mul"]
protected def Int8.mul (a b : Int8) : Int8 := a.toBitVec * b.toBitVec
def Int8.mul (a b : Int8) : Int8 := a.toBitVec * b.toBitVec
/--
Truncating division for 8-bit signed integers, rounding towards zero. Usually accessed via the `/`
operator.
@@ -237,7 +237,7 @@ Examples:
* `Int8.div 10 0 = 0`
-/
@[extern "lean_int8_div"]
protected def Int8.div (a b : Int8) : Int8 := BitVec.sdiv a.toBitVec b.toBitVec
def Int8.div (a b : Int8) : Int8 := BitVec.sdiv a.toBitVec b.toBitVec
/--
The modulo operator for 8-bit signed integers, which computes the remainder when dividing one
integer by another with the T-rounding convention used by `Int8.div`. Usually accessed via the `%`
@@ -258,7 +258,7 @@ Examples:
* `Int8.mod (-4) 0 = (-4)`
-/
@[extern "lean_int8_mod"]
protected def Int8.mod (a b : Int8) : Int8 := BitVec.srem a.toBitVec b.toBitVec
def Int8.mod (a b : Int8) : Int8 := BitVec.srem a.toBitVec b.toBitVec
/--
Bitwise and for 8-bit signed integers. Usually accessed via the `&&&` operator.
@@ -268,7 +268,7 @@ according to the two's complement representation.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_int8_land"]
protected def Int8.land (a b : Int8) : Int8 := a.toBitVec &&& b.toBitVec
def Int8.land (a b : Int8) : Int8 := a.toBitVec &&& b.toBitVec
/--
Bitwise or for 8-bit signed integers. Usually accessed via the `|||` operator.
@@ -278,7 +278,7 @@ integers is set, according to the two's complement representation.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_int8_lor"]
protected def Int8.lor (a b : Int8) : Int8 := a.toBitVec ||| b.toBitVec
def Int8.lor (a b : Int8) : Int8 := a.toBitVec ||| b.toBitVec
/--
Bitwise exclusive or for 8-bit signed integers. Usually accessed via the `^^^` operator.
@@ -288,7 +288,7 @@ integers is set, according to the two's complement representation.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_int8_xor"]
protected def Int8.xor (a b : Int8) : Int8 := a.toBitVec ^^^ b.toBitVec
def Int8.xor (a b : Int8) : Int8 := a.toBitVec ^^^ b.toBitVec
/--
Bitwise left shift for 8-bit signed integers. Usually accessed via the `<<<` operator.
@@ -297,7 +297,7 @@ Signed integers are interpreted as bitvectors according to the two's complement
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_int8_shift_left"]
protected def Int8.shiftLeft (a b : Int8) : Int8 := a.toBitVec <<< (b.toBitVec.smod 8)
def Int8.shiftLeft (a b : Int8) : Int8 := a.toBitVec <<< (b.toBitVec.smod 8)
/--
Arithmetic right shift for 8-bit signed integers. Usually accessed via the `<<<` operator.
@@ -306,7 +306,7 @@ The high bits are filled with the value of the most significant bit.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_int8_shift_right"]
protected def Int8.shiftRight (a b : Int8) : Int8 := BitVec.sshiftRight' a.toBitVec (b.toBitVec.smod 8)
def Int8.shiftRight (a b : Int8) : Int8 := BitVec.sshiftRight' a.toBitVec (b.toBitVec.smod 8)
/--
Bitwise complement, also known as bitwise negation, for 8-bit signed integers. Usually accessed via
the `~~~` prefix operator.
@@ -317,7 +317,7 @@ Integers use the two's complement representation, so `Int8.complement a = -(a +
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_int8_complement"]
protected def Int8.complement (a : Int8) : Int8 := ~~~a.toBitVec
def Int8.complement (a : Int8) : Int8 := ~~~a.toBitVec
/--
Computes the absolute value of an 8-bit signed integer.
@@ -327,7 +327,7 @@ mapped to `Int8.minValue`.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_int8_abs"]
protected def Int8.abs (a : Int8) : Int8 := a.toBitVec.abs
def Int8.abs (a : Int8) : Int8 := a.toBitVec.abs
/--
Decides whether two 8-bit signed integers are equal. Usually accessed via the `DecidableEq Int8`
@@ -353,12 +353,12 @@ def Int8.decEq (a b : Int8) : Decidable (a = b) :=
Strict inequality of 8-bit signed integers, defined as inequality of the corresponding integers.
Usually accessed via the `<` operator.
-/
protected def Int8.lt (a b : Int8) : Prop := a.toBitVec.slt b.toBitVec
def Int8.lt (a b : Int8) : Prop := a.toBitVec.slt b.toBitVec
/--
Non-strict inequality of 8-bit signed integers, defined as inequality of the corresponding integers.
Usually accessed via the `≤` operator.
-/
protected def Int8.le (a b : Int8) : Prop := a.toBitVec.sle b.toBitVec
def Int8.le (a b : Int8) : Prop := a.toBitVec.sle b.toBitVec
instance : Inhabited Int8 where
default := 0
@@ -563,7 +563,7 @@ operator.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_int16_add"]
protected def Int16.add (a b : Int16) : Int16 := a.toBitVec + b.toBitVec
def Int16.add (a b : Int16) : Int16 := a.toBitVec + b.toBitVec
/--
Subtracts one 16-bit signed integer from another, wrapping around on over- or underflow. Usually
accessed via the `-` operator.
@@ -571,7 +571,7 @@ accessed via the `-` operator.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_int16_sub"]
protected def Int16.sub (a b : Int16) : Int16 := a.toBitVec - b.toBitVec
def Int16.sub (a b : Int16) : Int16 := a.toBitVec - b.toBitVec
/--
Multiplies two 16-bit signed integers, wrapping around on over- or underflow. Usually accessed via
the `*` operator.
@@ -579,7 +579,7 @@ the `*` operator.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_int16_mul"]
protected def Int16.mul (a b : Int16) : Int16 := a.toBitVec * b.toBitVec
def Int16.mul (a b : Int16) : Int16 := a.toBitVec * b.toBitVec
/--
Truncating division for 16-bit signed integers, rounding towards zero. Usually accessed via the `/`
operator.
@@ -596,7 +596,7 @@ Examples:
* `Int16.div 10 0 = 0`
-/
@[extern "lean_int16_div"]
protected def Int16.div (a b : Int16) : Int16 := BitVec.sdiv a.toBitVec b.toBitVec
def Int16.div (a b : Int16) : Int16 := BitVec.sdiv a.toBitVec b.toBitVec
/--
The modulo operator for 16-bit signed integers, which computes the remainder when dividing one
integer by another with the T-rounding convention used by `Int16.div`. Usually accessed via the `%`
@@ -617,7 +617,7 @@ Examples:
* `Int16.mod (-4) 0 = (-4)`
-/
@[extern "lean_int16_mod"]
protected def Int16.mod (a b : Int16) : Int16 := BitVec.srem a.toBitVec b.toBitVec
def Int16.mod (a b : Int16) : Int16 := BitVec.srem a.toBitVec b.toBitVec
/--
Bitwise and for 16-bit signed integers. Usually accessed via the `&&&` operator.
@@ -627,7 +627,7 @@ according to the two's complement representation.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_int16_land"]
protected def Int16.land (a b : Int16) : Int16 := a.toBitVec &&& b.toBitVec
def Int16.land (a b : Int16) : Int16 := a.toBitVec &&& b.toBitVec
/--
Bitwise or for 16-bit signed integers. Usually accessed via the `|||` operator.
@@ -637,7 +637,7 @@ integers is set, according to the two's complement representation.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_int16_lor"]
protected def Int16.lor (a b : Int16) : Int16 := a.toBitVec ||| b.toBitVec
def Int16.lor (a b : Int16) : Int16 := a.toBitVec ||| b.toBitVec
/--
Bitwise exclusive or for 16-bit signed integers. Usually accessed via the `^^^` operator.
@@ -647,7 +647,7 @@ integers is set, according to the two's complement representation.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_int16_xor"]
protected def Int16.xor (a b : Int16) : Int16 := a.toBitVec ^^^ b.toBitVec
def Int16.xor (a b : Int16) : Int16 := a.toBitVec ^^^ b.toBitVec
/--
Bitwise left shift for 16-bit signed integers. Usually accessed via the `<<<` operator.
@@ -656,7 +656,7 @@ Signed integers are interpreted as bitvectors according to the two's complement
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_int16_shift_left"]
protected def Int16.shiftLeft (a b : Int16) : Int16 := a.toBitVec <<< (b.toBitVec.smod 16)
def Int16.shiftLeft (a b : Int16) : Int16 := a.toBitVec <<< (b.toBitVec.smod 16)
/--
Arithmetic right shift for 16-bit signed integers. Usually accessed via the `<<<` operator.
@@ -665,7 +665,7 @@ The high bits are filled with the value of the most significant bit.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_int16_shift_right"]
protected def Int16.shiftRight (a b : Int16) : Int16 := BitVec.sshiftRight' a.toBitVec (b.toBitVec.smod 16)
def Int16.shiftRight (a b : Int16) : Int16 := BitVec.sshiftRight' a.toBitVec (b.toBitVec.smod 16)
/--
Bitwise complement, also known as bitwise negation, for 16-bit signed integers. Usually accessed via
the `~~~` prefix operator.
@@ -676,7 +676,7 @@ Integers use the two's complement representation, so `Int16.complement a = -(a +
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_int16_complement"]
protected def Int16.complement (a : Int16) : Int16 := ~~~a.toBitVec
def Int16.complement (a : Int16) : Int16 := ~~~a.toBitVec
/--
Computes the absolute value of a 16-bit signed integer.
@@ -686,7 +686,7 @@ mapped to `Int16.minValue`.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_int16_abs"]
protected def Int16.abs (a : Int16) : Int16 := a.toBitVec.abs
def Int16.abs (a : Int16) : Int16 := a.toBitVec.abs
/--
Decides whether two 16-bit signed integers are equal. Usually accessed via the `DecidableEq Int16`
@@ -712,12 +712,12 @@ def Int16.decEq (a b : Int16) : Decidable (a = b) :=
Strict inequality of 16-bit signed integers, defined as inequality of the corresponding integers.
Usually accessed via the `<` operator.
-/
protected def Int16.lt (a b : Int16) : Prop := a.toBitVec.slt b.toBitVec
def Int16.lt (a b : Int16) : Prop := a.toBitVec.slt b.toBitVec
/--
Non-strict inequality of 16-bit signed integers, defined as inequality of the corresponding
integers. Usually accessed via the `≤` operator.
-/
protected def Int16.le (a b : Int16) : Prop := a.toBitVec.sle b.toBitVec
def Int16.le (a b : Int16) : Prop := a.toBitVec.sle b.toBitVec
instance : Inhabited Int16 where
default := 0
@@ -938,7 +938,7 @@ Adds two 32-bit signed integers, wrapping around on over- or underflow. Usually
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_int32_add"]
protected def Int32.add (a b : Int32) : Int32 := a.toBitVec + b.toBitVec
def Int32.add (a b : Int32) : Int32 := a.toBitVec + b.toBitVec
/--
Subtracts one 32-bit signed integer from another, wrapping around on over- or underflow. Usually
accessed via the `-` operator.
@@ -946,7 +946,7 @@ accessed via the `-` operator.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_int32_sub"]
protected def Int32.sub (a b : Int32) : Int32 := a.toBitVec - b.toBitVec
def Int32.sub (a b : Int32) : Int32 := a.toBitVec - b.toBitVec
/--
Multiplies two 32-bit signed integers, wrapping around on over- or underflow. Usually accessed via
the `*` operator.
@@ -954,7 +954,7 @@ the `*` operator.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_int32_mul"]
protected def Int32.mul (a b : Int32) : Int32 := a.toBitVec * b.toBitVec
def Int32.mul (a b : Int32) : Int32 := a.toBitVec * b.toBitVec
/--
Truncating division for 32-bit signed integers, rounding towards zero. Usually accessed via the `/`
operator.
@@ -971,7 +971,7 @@ Examples:
* `Int32.div 10 0 = 0`
-/
@[extern "lean_int32_div"]
protected def Int32.div (a b : Int32) : Int32 := BitVec.sdiv a.toBitVec b.toBitVec
def Int32.div (a b : Int32) : Int32 := BitVec.sdiv a.toBitVec b.toBitVec
/--
The modulo operator for 32-bit signed integers, which computes the remainder when dividing one
integer by another with the T-rounding convention used by `Int32.div`. Usually accessed via the `%`
@@ -992,7 +992,7 @@ Examples:
* `Int32.mod (-4) 0 = (-4)`
-/
@[extern "lean_int32_mod"]
protected def Int32.mod (a b : Int32) : Int32 := BitVec.srem a.toBitVec b.toBitVec
def Int32.mod (a b : Int32) : Int32 := BitVec.srem a.toBitVec b.toBitVec
/--
Bitwise and for 32-bit signed integers. Usually accessed via the `&&&` operator.
@@ -1002,7 +1002,7 @@ according to the two's complement representation.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_int32_land"]
protected def Int32.land (a b : Int32) : Int32 := a.toBitVec &&& b.toBitVec
def Int32.land (a b : Int32) : Int32 := a.toBitVec &&& b.toBitVec
/--
Bitwise or for 32-bit signed integers. Usually accessed via the `|||` operator.
@@ -1012,7 +1012,7 @@ integers is set, according to the two's complement representation.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_int32_lor"]
protected def Int32.lor (a b : Int32) : Int32 := a.toBitVec ||| b.toBitVec
def Int32.lor (a b : Int32) : Int32 := a.toBitVec ||| b.toBitVec
/--
Bitwise exclusive or for 32-bit signed integers. Usually accessed via the `^^^` operator.
@@ -1022,7 +1022,7 @@ integers is set, according to the two's complement representation.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_int32_xor"]
protected def Int32.xor (a b : Int32) : Int32 := a.toBitVec ^^^ b.toBitVec
def Int32.xor (a b : Int32) : Int32 := a.toBitVec ^^^ b.toBitVec
/--
Bitwise left shift for 32-bit signed integers. Usually accessed via the `<<<` operator.
@@ -1031,7 +1031,7 @@ Signed integers are interpreted as bitvectors according to the two's complement
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_int32_shift_left"]
protected def Int32.shiftLeft (a b : Int32) : Int32 := a.toBitVec <<< (b.toBitVec.smod 32)
def Int32.shiftLeft (a b : Int32) : Int32 := a.toBitVec <<< (b.toBitVec.smod 32)
/--
Arithmetic right shift for 32-bit signed integers. Usually accessed via the `<<<` operator.
@@ -1040,7 +1040,7 @@ The high bits are filled with the value of the most significant bit.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_int32_shift_right"]
protected def Int32.shiftRight (a b : Int32) : Int32 := BitVec.sshiftRight' a.toBitVec (b.toBitVec.smod 32)
def Int32.shiftRight (a b : Int32) : Int32 := BitVec.sshiftRight' a.toBitVec (b.toBitVec.smod 32)
/--
Bitwise complement, also known as bitwise negation, for 32-bit signed integers. Usually accessed via
the `~~~` prefix operator.
@@ -1051,7 +1051,7 @@ Integers use the two's complement representation, so `Int32.complement a = -(a +
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_int32_complement"]
protected def Int32.complement (a : Int32) : Int32 := ~~~a.toBitVec
def Int32.complement (a : Int32) : Int32 := ~~~a.toBitVec
/--
Computes the absolute value of a 32-bit signed integer.
@@ -1061,7 +1061,7 @@ mapped to `Int32.minValue`.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_int32_abs"]
protected def Int32.abs (a : Int32) : Int32 := a.toBitVec.abs
def Int32.abs (a : Int32) : Int32 := a.toBitVec.abs
/--
Decides whether two 32-bit signed integers are equal. Usually accessed via the `DecidableEq Int32`
@@ -1087,12 +1087,12 @@ def Int32.decEq (a b : Int32) : Decidable (a = b) :=
Strict inequality of 32-bit signed integers, defined as inequality of the corresponding integers.
Usually accessed via the `<` operator.
-/
protected def Int32.lt (a b : Int32) : Prop := a.toBitVec.slt b.toBitVec
def Int32.lt (a b : Int32) : Prop := a.toBitVec.slt b.toBitVec
/--
Non-strict inequality of 32-bit signed integers, defined as inequality of the corresponding integers.
Usually accessed via the `≤` operator.
-/
protected def Int32.le (a b : Int32) : Prop := a.toBitVec.sle b.toBitVec
def Int32.le (a b : Int32) : Prop := a.toBitVec.sle b.toBitVec
instance : Inhabited Int32 where
default := 0
@@ -1333,7 +1333,7 @@ Adds two 64-bit signed integers, wrapping around on over- or underflow. Usually
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_int64_add"]
protected def Int64.add (a b : Int64) : Int64 := a.toBitVec + b.toBitVec
def Int64.add (a b : Int64) : Int64 := a.toBitVec + b.toBitVec
/--
Subtracts one 64-bit signed integer from another, wrapping around on over- or underflow. Usually
accessed via the `-` operator.
@@ -1341,7 +1341,7 @@ accessed via the `-` operator.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_int64_sub"]
protected def Int64.sub (a b : Int64) : Int64 := a.toBitVec - b.toBitVec
def Int64.sub (a b : Int64) : Int64 := a.toBitVec - b.toBitVec
/--
Multiplies two 64-bit signed integers, wrapping around on over- or underflow. Usually accessed via
the `*` operator.
@@ -1349,7 +1349,7 @@ the `*` operator.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_int64_mul"]
protected def Int64.mul (a b : Int64) : Int64 := a.toBitVec * b.toBitVec
def Int64.mul (a b : Int64) : Int64 := a.toBitVec * b.toBitVec
/--
Truncating division for 64-bit signed integers, rounding towards zero. Usually accessed via the `/`
operator.
@@ -1366,7 +1366,7 @@ Examples:
* `Int64.div 10 0 = 0`
-/
@[extern "lean_int64_div"]
protected def Int64.div (a b : Int64) : Int64 := BitVec.sdiv a.toBitVec b.toBitVec
def Int64.div (a b : Int64) : Int64 := BitVec.sdiv a.toBitVec b.toBitVec
/--
The modulo operator for 64-bit signed integers, which computes the remainder when dividing one
integer by another with the T-rounding convention used by `Int64.div`. Usually accessed via the `%`
@@ -1387,7 +1387,7 @@ Examples:
* `Int64.mod (-4) 0 = (-4)`
-/
@[extern "lean_int64_mod"]
protected def Int64.mod (a b : Int64) : Int64 := BitVec.srem a.toBitVec b.toBitVec
def Int64.mod (a b : Int64) : Int64 := BitVec.srem a.toBitVec b.toBitVec
/--
Bitwise and for 64-bit signed integers. Usually accessed via the `&&&` operator.
@@ -1397,7 +1397,7 @@ according to the two's complement representation.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_int64_land"]
protected def Int64.land (a b : Int64) : Int64 := a.toBitVec &&& b.toBitVec
def Int64.land (a b : Int64) : Int64 := a.toBitVec &&& b.toBitVec
/--
Bitwise or for 64-bit signed integers. Usually accessed via the `|||` operator.
@@ -1407,7 +1407,7 @@ integers is set, according to the two's complement representation.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_int64_lor"]
protected def Int64.lor (a b : Int64) : Int64 := a.toBitVec ||| b.toBitVec
def Int64.lor (a b : Int64) : Int64 := a.toBitVec ||| b.toBitVec
/--
Bitwise exclusive or for 64-bit signed integers. Usually accessed via the `^^^` operator.
@@ -1417,7 +1417,7 @@ integers is set, according to the two's complement representation.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_int64_xor"]
protected def Int64.xor (a b : Int64) : Int64 := a.toBitVec ^^^ b.toBitVec
def Int64.xor (a b : Int64) : Int64 := a.toBitVec ^^^ b.toBitVec
/--
Bitwise left shift for 64-bit signed integers. Usually accessed via the `<<<` operator.
@@ -1426,7 +1426,7 @@ Signed integers are interpreted as bitvectors according to the two's complement
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_int64_shift_left"]
protected def Int64.shiftLeft (a b : Int64) : Int64 := a.toBitVec <<< (b.toBitVec.smod 64)
def Int64.shiftLeft (a b : Int64) : Int64 := a.toBitVec <<< (b.toBitVec.smod 64)
/--
Arithmetic right shift for 64-bit signed integers. Usually accessed via the `<<<` operator.
@@ -1435,7 +1435,7 @@ The high bits are filled with the value of the most significant bit.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_int64_shift_right"]
protected def Int64.shiftRight (a b : Int64) : Int64 := BitVec.sshiftRight' a.toBitVec (b.toBitVec.smod 64)
def Int64.shiftRight (a b : Int64) : Int64 := BitVec.sshiftRight' a.toBitVec (b.toBitVec.smod 64)
/--
Bitwise complement, also known as bitwise negation, for 64-bit signed integers. Usually accessed via
the `~~~` prefix operator.
@@ -1446,7 +1446,7 @@ Integers use the two's complement representation, so `Int64.complement a = -(a +
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_int64_complement"]
protected def Int64.complement (a : Int64) : Int64 := ~~~a.toBitVec
def Int64.complement (a : Int64) : Int64 := ~~~a.toBitVec
/--
Computes the absolute value of a 64-bit signed integer.
@@ -1456,7 +1456,7 @@ mapped to `Int64.minValue`.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_int64_abs"]
protected def Int64.abs (a : Int64) : Int64 := a.toBitVec.abs
def Int64.abs (a : Int64) : Int64 := a.toBitVec.abs
/--
Decides whether two 64-bit signed integers are equal. Usually accessed via the `DecidableEq Int64`
@@ -1482,12 +1482,12 @@ def Int64.decEq (a b : Int64) : Decidable (a = b) :=
Strict inequality of 64-bit signed integers, defined as inequality of the corresponding integers.
Usually accessed via the `<` operator.
-/
protected def Int64.lt (a b : Int64) : Prop := a.toBitVec.slt b.toBitVec
def Int64.lt (a b : Int64) : Prop := a.toBitVec.slt b.toBitVec
/--
Non-strict inequality of 64-bit signed integers, defined as inequality of the corresponding integers.
Usually accessed via the `≤` operator.
-/
protected def Int64.le (a b : Int64) : Prop := a.toBitVec.sle b.toBitVec
def Int64.le (a b : Int64) : Prop := a.toBitVec.sle b.toBitVec
instance : Inhabited Int64 where
default := 0
@@ -1670,7 +1670,7 @@ Negates word-sized signed integers. Usually accessed via the `-` prefix operator
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_isize_neg"]
protected def ISize.neg (i : ISize) : ISize := -i.toBitVec
def ISize.neg (i : ISize) : ISize := -i.toBitVec
instance : ToString ISize where
toString i := toString i.toInt
@@ -1711,7 +1711,7 @@ the `+` operator.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_isize_add"]
protected def ISize.add (a b : ISize) : ISize := a.toBitVec + b.toBitVec
def ISize.add (a b : ISize) : ISize := a.toBitVec + b.toBitVec
/--
Subtracts one word-sized signed integer from another, wrapping around on over- or underflow. Usually
accessed via the `-` operator.
@@ -1719,7 +1719,7 @@ accessed via the `-` operator.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_isize_sub"]
protected def ISize.sub (a b : ISize) : ISize := a.toBitVec - b.toBitVec
def ISize.sub (a b : ISize) : ISize := a.toBitVec - b.toBitVec
/--
Multiplies two word-sized signed integers, wrapping around on over- or underflow. Usually accessed
via the `*` operator.
@@ -1727,7 +1727,7 @@ via the `*` operator.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_isize_mul"]
protected def ISize.mul (a b : ISize) : ISize := a.toBitVec * b.toBitVec
def ISize.mul (a b : ISize) : ISize := a.toBitVec * b.toBitVec
/--
Truncating division for word-sized signed integers, rounding towards zero. Usually accessed via the
`/` operator.
@@ -1744,7 +1744,7 @@ Examples:
* `ISize.div 10 0 = 0`
-/
@[extern "lean_isize_div"]
protected def ISize.div (a b : ISize) : ISize := BitVec.sdiv a.toBitVec b.toBitVec
def ISize.div (a b : ISize) : ISize := BitVec.sdiv a.toBitVec b.toBitVec
/--
The modulo operator for word-sized signed integers, which computes the remainder when dividing one
integer by another with the T-rounding convention used by `ISize.div`. Usually accessed via the `%`
@@ -1765,7 +1765,7 @@ Examples:
* `ISize.mod (-4) 0 = (-4)`
-/
@[extern "lean_isize_mod"]
protected def ISize.mod (a b : ISize) : ISize := BitVec.srem a.toBitVec b.toBitVec
def ISize.mod (a b : ISize) : ISize := BitVec.srem a.toBitVec b.toBitVec
/--
Bitwise and for word-sized signed integers. Usually accessed via the `&&&` operator.
@@ -1775,7 +1775,7 @@ according to the two's complement representation.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_isize_land"]
protected def ISize.land (a b : ISize) : ISize := a.toBitVec &&& b.toBitVec
def ISize.land (a b : ISize) : ISize := a.toBitVec &&& b.toBitVec
/--
Bitwise or for word-sized signed integers. Usually accessed via the `|||` operator.
@@ -1785,7 +1785,7 @@ integers is set, according to the two's complement representation.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_isize_lor"]
protected def ISize.lor (a b : ISize) : ISize := a.toBitVec ||| b.toBitVec
def ISize.lor (a b : ISize) : ISize := a.toBitVec ||| b.toBitVec
/--
Bitwise exclusive or for word-sized signed integers. Usually accessed via the `^^^` operator.
@@ -1795,7 +1795,7 @@ integers is set, according to the two's complement representation.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_isize_xor"]
protected def ISize.xor (a b : ISize) : ISize := a.toBitVec ^^^ b.toBitVec
def ISize.xor (a b : ISize) : ISize := a.toBitVec ^^^ b.toBitVec
/--
Bitwise left shift for word-sized signed integers. Usually accessed via the `<<<` operator.
@@ -1804,7 +1804,7 @@ Signed integers are interpreted as bitvectors according to the two's complement
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_isize_shift_left"]
protected def ISize.shiftLeft (a b : ISize) : ISize := a.toBitVec <<< (b.toBitVec.smod System.Platform.numBits)
def ISize.shiftLeft (a b : ISize) : ISize := a.toBitVec <<< (b.toBitVec.smod System.Platform.numBits)
/--
Arithmetic right shift for word-sized signed integers. Usually accessed via the `<<<` operator.
@@ -1814,7 +1814,7 @@ the most significant bit.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_isize_shift_right"]
protected def ISize.shiftRight (a b : ISize) : ISize := BitVec.sshiftRight' a.toBitVec (b.toBitVec.smod System.Platform.numBits)
def ISize.shiftRight (a b : ISize) : ISize := BitVec.sshiftRight' a.toBitVec (b.toBitVec.smod System.Platform.numBits)
/--
Bitwise complement, also known as bitwise negation, for word-sized signed integers. Usually accessed
via the `~~~` prefix operator.
@@ -1825,7 +1825,7 @@ Integers use the two's complement representation, so `ISize.complement a = -(a +
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_isize_complement"]
protected def ISize.complement (a : ISize) : ISize := ~~~a.toBitVec
def ISize.complement (a : ISize) : ISize := ~~~a.toBitVec
/--
Computes the absolute value of a word-sized signed integer.
@@ -1836,7 +1836,7 @@ mapped to `ISize.minValue`.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_isize_abs"]
protected def ISize.abs (a : ISize) : ISize := a.toBitVec.abs
def ISize.abs (a : ISize) : ISize := a.toBitVec.abs
/--
Decides whether two word-sized signed integers are equal. Usually accessed via the
@@ -1862,12 +1862,12 @@ def ISize.decEq (a b : ISize) : Decidable (a = b) :=
Strict inequality of word-sized signed integers, defined as inequality of the corresponding
integers. Usually accessed via the `<` operator.
-/
protected def ISize.lt (a b : ISize) : Prop := a.toBitVec.slt b.toBitVec
def ISize.lt (a b : ISize) : Prop := a.toBitVec.slt b.toBitVec
/--
Non-strict inequality of word-sized signed integers, defined as inequality of the corresponding
integers. Usually accessed via the `≤` operator.
-/
protected def ISize.le (a b : ISize) : Prop := a.toBitVec.sle b.toBitVec
def ISize.le (a b : ISize) : Prop := a.toBitVec.sle b.toBitVec
instance : Inhabited ISize where
default := 0

View File

@@ -12,6 +12,11 @@ macro "declare_bitwise_int_theorems" typeName:ident bits:term:arg : command =>
`(
namespace $typeName
@[simp, int_toBitVec] protected theorem toBitVec_add {a b : $typeName} : (a + b).toBitVec = a.toBitVec + b.toBitVec := rfl
@[simp, int_toBitVec] protected theorem toBitVec_sub {a b : $typeName} : (a - b).toBitVec = a.toBitVec - b.toBitVec := rfl
@[simp, int_toBitVec] protected theorem toBitVec_mul {a b : $typeName} : (a * b).toBitVec = a.toBitVec * b.toBitVec := rfl
@[simp, int_toBitVec] protected theorem toBitVec_div {a b : $typeName} : (a / b).toBitVec = a.toBitVec.sdiv b.toBitVec := rfl
@[simp, int_toBitVec] protected theorem toBitVec_mod {a b : $typeName} : (a % b).toBitVec = a.toBitVec.srem b.toBitVec := rfl
@[simp, int_toBitVec] protected theorem toBitVec_not {a : $typeName} : (~~~a).toBitVec = ~~~a.toBitVec := rfl
@[simp, int_toBitVec] protected theorem toBitVec_and (a b : $typeName) : (a &&& b).toBitVec = a.toBitVec &&& b.toBitVec := rfl
@[simp, int_toBitVec] protected theorem toBitVec_or (a b : $typeName) : (a ||| b).toBitVec = a.toBitVec ||| b.toBitVec := rfl
@@ -52,6 +57,30 @@ theorem Bool.toBitVec_toISize {b : Bool} :
· apply BitVec.eq_of_toNat_eq
simp [toISize]
@[simp] theorem UInt8.toInt8_add (a b : UInt8) : (a + b).toInt8 = a.toInt8 + b.toInt8 := rfl
@[simp] theorem UInt16.toInt16_add (a b : UInt16) : (a + b).toInt16 = a.toInt16 + b.toInt16 := rfl
@[simp] theorem UInt32.toInt32_add (a b : UInt32) : (a + b).toInt32 = a.toInt32 + b.toInt32 := rfl
@[simp] theorem UInt64.toInt64_add (a b : UInt64) : (a + b).toInt64 = a.toInt64 + b.toInt64 := rfl
@[simp] theorem USize.toISize_add (a b : USize) : (a + b).toISize = a.toISize + b.toISize := rfl
@[simp] theorem UInt8.toInt8_neg (a : UInt8) : (-a).toInt8 = -a.toInt8 := rfl
@[simp] theorem UInt16.toInt16_neg (a : UInt16) : (-a).toInt16 = -a.toInt16 := rfl
@[simp] theorem UInt32.toInt32_neg (a : UInt32) : (-a).toInt32 = -a.toInt32 := rfl
@[simp] theorem UInt64.toInt64_neg (a : UInt64) : (-a).toInt64 = -a.toInt64 := rfl
@[simp] theorem USize.toISize_neg (a : USize) : (-a).toISize = -a.toISize := rfl
@[simp] theorem UInt8.toInt8_sub (a b : UInt8) : (a - b).toInt8 = a.toInt8 - b.toInt8 := rfl
@[simp] theorem UInt16.toInt16_sub (a b : UInt16) : (a - b).toInt16 = a.toInt16 - b.toInt16 := rfl
@[simp] theorem UInt32.toInt32_sub (a b : UInt32) : (a - b).toInt32 = a.toInt32 - b.toInt32 := rfl
@[simp] theorem UInt64.toInt64_sub (a b : UInt64) : (a - b).toInt64 = a.toInt64 - b.toInt64 := rfl
@[simp] theorem USize.toISize_sub (a b : USize) : (a - b).toISize = a.toISize - b.toISize := rfl
@[simp] theorem UInt8.toInt8_mul (a b : UInt8) : (a * b).toInt8 = a.toInt8 * b.toInt8 := rfl
@[simp] theorem UInt16.toInt16_mul (a b : UInt16) : (a * b).toInt16 = a.toInt16 * b.toInt16 := rfl
@[simp] theorem UInt32.toInt32_mul (a b : UInt32) : (a * b).toInt32 = a.toInt32 * b.toInt32 := rfl
@[simp] theorem UInt64.toInt64_mul (a b : UInt64) : (a * b).toInt64 = a.toInt64 * b.toInt64 := rfl
@[simp] theorem USize.toISize_mul (a b : USize) : (a * b).toISize = a.toISize * b.toISize := rfl
@[simp] theorem UInt8.toInt8_and (a b : UInt8) : (a &&& b).toInt8 = a.toInt8 &&& b.toInt8 := rfl
@[simp] theorem UInt16.toInt16_and (a b : UInt16) : (a &&& b).toInt16 = a.toInt16 &&& b.toInt16 := rfl
@[simp] theorem UInt32.toInt32_and (a b : UInt32) : (a &&& b).toInt32 = a.toInt32 &&& b.toInt32 := rfl
@@ -75,686 +104,3 @@ theorem Bool.toBitVec_toISize {b : Bool} :
@[simp] theorem UInt32.toInt32_not (a : UInt32) : (~~~a).toInt32 = ~~~a.toInt32 := rfl
@[simp] theorem UInt64.toInt64_not (a : UInt64) : (~~~a).toInt64 = ~~~a.toInt64 := rfl
@[simp] theorem USize.toISize_not (a : USize) : (~~~a).toISize = ~~~a.toISize := rfl
@[simp] theorem Int8.toUInt8_and (a b : Int8) : (a &&& b).toUInt8 = a.toUInt8 &&& b.toUInt8 := rfl
@[simp] theorem Int16.toUInt16_and (a b : Int16) : (a &&& b).toUInt16 = a.toUInt16 &&& b.toUInt16 := rfl
@[simp] theorem Int32.toUInt32_and (a b : Int32) : (a &&& b).toUInt32 = a.toUInt32 &&& b.toUInt32 := rfl
@[simp] theorem Int64.toUInt64_and (a b : Int64) : (a &&& b).toUInt64 = a.toUInt64 &&& b.toUInt64 := rfl
@[simp] theorem ISize.toUSize_and (a b : ISize) : (a &&& b).toUSize = a.toUSize &&& b.toUSize := rfl
@[simp] theorem Int8.toUInt8_or (a b : Int8) : (a ||| b).toUInt8 = a.toUInt8 ||| b.toUInt8 := rfl
@[simp] theorem Int16.toUInt16_or (a b : Int16) : (a ||| b).toUInt16 = a.toUInt16 ||| b.toUInt16 := rfl
@[simp] theorem Int32.toUInt32_or (a b : Int32) : (a ||| b).toUInt32 = a.toUInt32 ||| b.toUInt32 := rfl
@[simp] theorem Int64.toUInt64_or (a b : Int64) : (a ||| b).toUInt64 = a.toUInt64 ||| b.toUInt64 := rfl
@[simp] theorem ISize.toUSize_or (a b : ISize) : (a ||| b).toUSize = a.toUSize ||| b.toUSize := rfl
@[simp] theorem Int8.toUInt8_xor (a b : Int8) : (a ^^^ b).toUInt8 = a.toUInt8 ^^^ b.toUInt8 := rfl
@[simp] theorem Int16.toUInt16_xor (a b : Int16) : (a ^^^ b).toUInt16 = a.toUInt16 ^^^ b.toUInt16 := rfl
@[simp] theorem Int32.toUInt32_xor (a b : Int32) : (a ^^^ b).toUInt32 = a.toUInt32 ^^^ b.toUInt32 := rfl
@[simp] theorem Int64.toUInt64_xor (a b : Int64) : (a ^^^ b).toUInt64 = a.toUInt64 ^^^ b.toUInt64 := rfl
@[simp] theorem ISize.toUSize_xor (a b : ISize) : (a ^^^ b).toUSize = a.toUSize ^^^ b.toUSize := rfl
@[simp] theorem Int8.toUInt8_not (a : Int8) : (~~~a).toUInt8 = ~~~a.toUInt8 := rfl
@[simp] theorem Int16.toUInt16_not (a : Int16) : (~~~a).toUInt16 = ~~~a.toUInt16 := rfl
@[simp] theorem Int32.toUInt32_not (a : Int32) : (~~~a).toUInt32 = ~~~a.toUInt32 := rfl
@[simp] theorem Int64.toUInt64_not (a : Int64) : (~~~a).toUInt64 = ~~~a.toUInt64 := rfl
@[simp] theorem ISize.toUSize_not (a : ISize) : (~~~a).toUSize = ~~~a.toUSize := rfl
@[simp] theorem Int8.toInt16_and (a b : Int8) : (a &&& b).toInt16 = a.toInt16 &&& b.toInt16 := Int16.toBitVec_inj.1 (by simp)
@[simp] theorem Int8.toInt32_and (a b : Int8) : (a &&& b).toInt32 = a.toInt32 &&& b.toInt32 := Int32.toBitVec_inj.1 (by simp)
@[simp] theorem Int8.toInt64_and (a b : Int8) : (a &&& b).toInt64 = a.toInt64 &&& b.toInt64 := Int64.toBitVec_inj.1 (by simp)
@[simp] theorem Int8.toISize_and (a b : Int8) : (a &&& b).toISize = a.toISize &&& b.toISize := ISize.toBitVec_inj.1 (by simp)
@[simp] theorem Int16.toInt8_and (a b : Int16) : (a &&& b).toInt8 = a.toInt8 &&& b.toInt8 := Int8.toBitVec_inj.1 (by simp)
@[simp] theorem Int16.toInt32_and (a b : Int16) : (a &&& b).toInt32 = a.toInt32 &&& b.toInt32 := Int32.toBitVec_inj.1 (by simp)
@[simp] theorem Int16.toInt64_and (a b : Int16) : (a &&& b).toInt64 = a.toInt64 &&& b.toInt64 := Int64.toBitVec_inj.1 (by simp)
@[simp] theorem Int16.toISize_and (a b : Int16) : (a &&& b).toISize = a.toISize &&& b.toISize := ISize.toBitVec_inj.1 (by simp)
@[simp] theorem Int32.toInt8_and (a b : Int32) : (a &&& b).toInt8 = a.toInt8 &&& b.toInt8 := Int8.toBitVec_inj.1 (by simp)
@[simp] theorem Int32.toInt16_and (a b : Int32) : (a &&& b).toInt16 = a.toInt16 &&& b.toInt16 := Int16.toBitVec_inj.1 (by simp)
@[simp] theorem Int32.toInt64_and (a b : Int32) : (a &&& b).toInt64 = a.toInt64 &&& b.toInt64 := Int64.toBitVec_inj.1 (by simp)
@[simp] theorem Int32.toISize_and (a b : Int32) : (a &&& b).toISize = a.toISize &&& b.toISize := ISize.toBitVec_inj.1 (by simp)
@[simp] theorem ISize.toInt8_and (a b : ISize) : (a &&& b).toInt8 = a.toInt8 &&& b.toInt8 := Int8.toBitVec_inj.1 (by simp)
@[simp] theorem ISize.toInt16_and (a b : ISize) : (a &&& b).toInt16 = a.toInt16 &&& b.toInt16 := Int16.toBitVec_inj.1 (by simp)
@[simp] theorem ISize.toInt32_and (a b : ISize) : (a &&& b).toInt32 = a.toInt32 &&& b.toInt32 := Int32.toBitVec_inj.1 (by simp)
@[simp] theorem ISize.toInt64_and (a b : ISize) : (a &&& b).toInt64 = a.toInt64 &&& b.toInt64 := Int64.toBitVec_inj.1 (by simp)
@[simp] theorem Int64.toInt8_and (a b : Int64) : (a &&& b).toInt8 = a.toInt8 &&& b.toInt8 := Int8.toBitVec_inj.1 (by simp)
@[simp] theorem Int64.toInt16_and (a b : Int64) : (a &&& b).toInt16 = a.toInt16 &&& b.toInt16 := Int16.toBitVec_inj.1 (by simp)
@[simp] theorem Int64.toInt32_and (a b : Int64) : (a &&& b).toInt32 = a.toInt32 &&& b.toInt32 := Int32.toBitVec_inj.1 (by simp)
@[simp] theorem Int64.toISize_and (a b : Int64) : (a &&& b).toISize = a.toISize &&& b.toISize := ISize.toBitVec_inj.1 (by simp)
@[simp] theorem Int8.toInt16_or (a b : Int8) : (a ||| b).toInt16 = a.toInt16 ||| b.toInt16 := Int16.toBitVec_inj.1 (by simp)
@[simp] theorem Int8.toInt32_or (a b : Int8) : (a ||| b).toInt32 = a.toInt32 ||| b.toInt32 := Int32.toBitVec_inj.1 (by simp)
@[simp] theorem Int8.toInt64_or (a b : Int8) : (a ||| b).toInt64 = a.toInt64 ||| b.toInt64 := Int64.toBitVec_inj.1 (by simp)
@[simp] theorem Int8.toISize_or (a b : Int8) : (a ||| b).toISize = a.toISize ||| b.toISize := ISize.toBitVec_inj.1 (by simp)
@[simp] theorem Int16.toInt8_or (a b : Int16) : (a ||| b).toInt8 = a.toInt8 ||| b.toInt8 := Int8.toBitVec_inj.1 (by simp)
@[simp] theorem Int16.toInt32_or (a b : Int16) : (a ||| b).toInt32 = a.toInt32 ||| b.toInt32 := Int32.toBitVec_inj.1 (by simp)
@[simp] theorem Int16.toInt64_or (a b : Int16) : (a ||| b).toInt64 = a.toInt64 ||| b.toInt64 := Int64.toBitVec_inj.1 (by simp)
@[simp] theorem Int16.toISize_or (a b : Int16) : (a ||| b).toISize = a.toISize ||| b.toISize := ISize.toBitVec_inj.1 (by simp)
@[simp] theorem Int32.toInt8_or (a b : Int32) : (a ||| b).toInt8 = a.toInt8 ||| b.toInt8 := Int8.toBitVec_inj.1 (by simp)
@[simp] theorem Int32.toInt16_or (a b : Int32) : (a ||| b).toInt16 = a.toInt16 ||| b.toInt16 := Int16.toBitVec_inj.1 (by simp)
@[simp] theorem Int32.toInt64_or (a b : Int32) : (a ||| b).toInt64 = a.toInt64 ||| b.toInt64 := Int64.toBitVec_inj.1 (by simp)
@[simp] theorem Int32.toISize_or (a b : Int32) : (a ||| b).toISize = a.toISize ||| b.toISize := ISize.toBitVec_inj.1 (by simp)
@[simp] theorem ISize.toInt8_or (a b : ISize) : (a ||| b).toInt8 = a.toInt8 ||| b.toInt8 := Int8.toBitVec_inj.1 (by simp)
@[simp] theorem ISize.toInt16_or (a b : ISize) : (a ||| b).toInt16 = a.toInt16 ||| b.toInt16 := Int16.toBitVec_inj.1 (by simp)
@[simp] theorem ISize.toInt32_or (a b : ISize) : (a ||| b).toInt32 = a.toInt32 ||| b.toInt32 := Int32.toBitVec_inj.1 (by simp)
@[simp] theorem ISize.toInt64_or (a b : ISize) : (a ||| b).toInt64 = a.toInt64 ||| b.toInt64 := Int64.toBitVec_inj.1 (by simp)
@[simp] theorem Int64.toInt8_or (a b : Int64) : (a ||| b).toInt8 = a.toInt8 ||| b.toInt8 := Int8.toBitVec_inj.1 (by simp)
@[simp] theorem Int64.toInt16_or (a b : Int64) : (a ||| b).toInt16 = a.toInt16 ||| b.toInt16 := Int16.toBitVec_inj.1 (by simp)
@[simp] theorem Int64.toInt32_or (a b : Int64) : (a ||| b).toInt32 = a.toInt32 ||| b.toInt32 := Int32.toBitVec_inj.1 (by simp)
@[simp] theorem Int64.toISize_or (a b : Int64) : (a ||| b).toISize = a.toISize ||| b.toISize := ISize.toBitVec_inj.1 (by simp)
@[simp] theorem Int8.toInt16_xor (a b : Int8) : (a ^^^ b).toInt16 = a.toInt16 ^^^ b.toInt16 := Int16.toBitVec_inj.1 (by simp)
@[simp] theorem Int8.toInt32_xor (a b : Int8) : (a ^^^ b).toInt32 = a.toInt32 ^^^ b.toInt32 := Int32.toBitVec_inj.1 (by simp)
@[simp] theorem Int8.toInt64_xor (a b : Int8) : (a ^^^ b).toInt64 = a.toInt64 ^^^ b.toInt64 := Int64.toBitVec_inj.1 (by simp)
@[simp] theorem Int8.toISize_xor (a b : Int8) : (a ^^^ b).toISize = a.toISize ^^^ b.toISize := ISize.toBitVec_inj.1 (by simp)
@[simp] theorem Int16.toInt8_xor (a b : Int16) : (a ^^^ b).toInt8 = a.toInt8 ^^^ b.toInt8 := Int8.toBitVec_inj.1 (by simp)
@[simp] theorem Int16.toInt32_xor (a b : Int16) : (a ^^^ b).toInt32 = a.toInt32 ^^^ b.toInt32 := Int32.toBitVec_inj.1 (by simp)
@[simp] theorem Int16.toInt64_xor (a b : Int16) : (a ^^^ b).toInt64 = a.toInt64 ^^^ b.toInt64 := Int64.toBitVec_inj.1 (by simp)
@[simp] theorem Int16.toISize_xor (a b : Int16) : (a ^^^ b).toISize = a.toISize ^^^ b.toISize := ISize.toBitVec_inj.1 (by simp)
@[simp] theorem Int32.toInt8_xor (a b : Int32) : (a ^^^ b).toInt8 = a.toInt8 ^^^ b.toInt8 := Int8.toBitVec_inj.1 (by simp)
@[simp] theorem Int32.toInt16_xor (a b : Int32) : (a ^^^ b).toInt16 = a.toInt16 ^^^ b.toInt16 := Int16.toBitVec_inj.1 (by simp)
@[simp] theorem Int32.toInt64_xor (a b : Int32) : (a ^^^ b).toInt64 = a.toInt64 ^^^ b.toInt64 := Int64.toBitVec_inj.1 (by simp)
@[simp] theorem Int32.toISize_xor (a b : Int32) : (a ^^^ b).toISize = a.toISize ^^^ b.toISize := ISize.toBitVec_inj.1 (by simp)
@[simp] theorem ISize.toInt8_xor (a b : ISize) : (a ^^^ b).toInt8 = a.toInt8 ^^^ b.toInt8 := Int8.toBitVec_inj.1 (by simp)
@[simp] theorem ISize.toInt16_xor (a b : ISize) : (a ^^^ b).toInt16 = a.toInt16 ^^^ b.toInt16 := Int16.toBitVec_inj.1 (by simp)
@[simp] theorem ISize.toInt32_xor (a b : ISize) : (a ^^^ b).toInt32 = a.toInt32 ^^^ b.toInt32 := Int32.toBitVec_inj.1 (by simp)
@[simp] theorem ISize.toInt64_xor (a b : ISize) : (a ^^^ b).toInt64 = a.toInt64 ^^^ b.toInt64 := Int64.toBitVec_inj.1 (by simp)
@[simp] theorem Int64.toInt8_xor (a b : Int64) : (a ^^^ b).toInt8 = a.toInt8 ^^^ b.toInt8 := Int8.toBitVec_inj.1 (by simp)
@[simp] theorem Int64.toInt16_xor (a b : Int64) : (a ^^^ b).toInt16 = a.toInt16 ^^^ b.toInt16 := Int16.toBitVec_inj.1 (by simp)
@[simp] theorem Int64.toInt32_xor (a b : Int64) : (a ^^^ b).toInt32 = a.toInt32 ^^^ b.toInt32 := Int32.toBitVec_inj.1 (by simp)
@[simp] theorem Int64.toISize_xor (a b : Int64) : (a ^^^ b).toISize = a.toISize ^^^ b.toISize := ISize.toBitVec_inj.1 (by simp)
theorem Int8.not_eq_neg_add (a : Int8) : ~~~a = -a - 1 := Int8.toBitVec_inj.1 (by simpa using BitVec.not_eq_neg_add _)
theorem Int16.not_eq_neg_add (a : Int16) : ~~~a = -a - 1 := Int16.toBitVec_inj.1 (by simpa using BitVec.not_eq_neg_add _)
theorem Int32.not_eq_neg_add (a : Int32) : ~~~a = -a - 1 := Int32.toBitVec_inj.1 (by simpa using BitVec.not_eq_neg_add _)
theorem Int64.not_eq_neg_add (a : Int64) : ~~~a = -a - 1 := Int64.toBitVec_inj.1 (by simpa using BitVec.not_eq_neg_add _)
theorem ISize.not_eq_neg_add (a : ISize) : ~~~a = -a - 1 := ISize.toBitVec_inj.1 (by simpa using BitVec.not_eq_neg_add _)
@[simp] theorem Int8.toInt_not (a : Int8) : (~~~a).toInt = (-a.toInt - 1).bmod (2 ^ 8) := by simp [Int8.not_eq_neg_add]
@[simp] theorem Int16.toInt_not (a : Int16) : (~~~a).toInt = (-a.toInt - 1).bmod (2 ^ 16) := by simp [Int16.not_eq_neg_add]
@[simp] theorem Int32.toInt_not (a : Int32) : (~~~a).toInt = (-a.toInt - 1).bmod (2 ^ 32) := by simp [Int32.not_eq_neg_add]
@[simp] theorem Int64.toInt_not (a : Int64) : (~~~a).toInt = (-a.toInt - 1).bmod (2 ^ 64) := by simp [Int64.not_eq_neg_add]
@[simp] theorem ISize.toInt_not (a : ISize) : (~~~a).toInt = (-a.toInt - 1).bmod (2 ^ System.Platform.numBits) := by
simp [ISize.not_eq_neg_add, toInt_neg]
@[simp] theorem Int16.toInt8_not (a : Int16) : (~~~a).toInt8 = ~~~a.toInt8 := Int8.toBitVec_inj.1 (by simp)
@[simp] theorem Int32.toInt8_not (a : Int32) : (~~~a).toInt8 = ~~~a.toInt8 := Int8.toBitVec_inj.1 (by simp)
@[simp] theorem Int64.toInt8_not (a : Int64) : (~~~a).toInt8 = ~~~a.toInt8 := Int8.toBitVec_inj.1 (by simp)
@[simp] theorem ISize.toInt8_not (a : ISize) : (~~~a).toInt8 = ~~~a.toInt8 := Int8.toBitVec_inj.1 (by simp [System.Platform.numBits_pos])
@[simp] theorem Int32.toInt16_not (a : Int32) : (~~~a).toInt16 = ~~~a.toInt16 := Int16.toBitVec_inj.1 (by simp)
@[simp] theorem Int64.toInt16_not (a : Int64) : (~~~a).toInt16 = ~~~a.toInt16 := Int16.toBitVec_inj.1 (by simp)
@[simp] theorem ISize.toInt16_not (a : ISize) : (~~~a).toInt16 = ~~~a.toInt16 := Int16.toBitVec_inj.1 (by simp [System.Platform.numBits_pos])
@[simp] theorem Int64.toInt32_not (a : Int64) : (~~~a).toInt32 = ~~~a.toInt32 := Int32.toBitVec_inj.1 (by simp)
@[simp] theorem ISize.toInt32_not (a : ISize) : (~~~a).toInt32 = ~~~a.toInt32 := Int32.toBitVec_inj.1 (by simp [System.Platform.numBits_pos])
@[simp] theorem Int64.toISize_not (a : Int64) : (~~~a).toISize = ~~~a.toISize := ISize.toBitVec_inj.1 (by simp)
@[simp] theorem Int8.ofBitVec_and (a b : BitVec 8) : Int8.ofBitVec (a &&& b) = Int8.ofBitVec a &&& Int8.ofBitVec b := rfl
@[simp] theorem Int16.ofBitVec_and (a b : BitVec 16) : Int16.ofBitVec (a &&& b) = Int16.ofBitVec a &&& Int16.ofBitVec b := rfl
@[simp] theorem Int32.ofBitVec_and (a b : BitVec 32) : Int32.ofBitVec (a &&& b) = Int32.ofBitVec a &&& Int32.ofBitVec b := rfl
@[simp] theorem Int64.ofBitVec_and (a b : BitVec 64) : Int64.ofBitVec (a &&& b) = Int64.ofBitVec a &&& Int64.ofBitVec b := rfl
@[simp] theorem ISize.ofBitVec_and (a b : BitVec System.Platform.numBits) : ISize.ofBitVec (a &&& b) = ISize.ofBitVec a &&& ISize.ofBitVec b := rfl
@[simp] theorem Int8.ofBitVec_or (a b : BitVec 8) : Int8.ofBitVec (a ||| b) = Int8.ofBitVec a ||| Int8.ofBitVec b := rfl
@[simp] theorem Int16.ofBitVec_or (a b : BitVec 16) : Int16.ofBitVec (a ||| b) = Int16.ofBitVec a ||| Int16.ofBitVec b := rfl
@[simp] theorem Int32.ofBitVec_or (a b : BitVec 32) : Int32.ofBitVec (a ||| b) = Int32.ofBitVec a ||| Int32.ofBitVec b := rfl
@[simp] theorem Int64.ofBitVec_or (a b : BitVec 64) : Int64.ofBitVec (a ||| b) = Int64.ofBitVec a ||| Int64.ofBitVec b := rfl
@[simp] theorem ISize.ofBitVec_or (a b : BitVec System.Platform.numBits) : ISize.ofBitVec (a ||| b) = ISize.ofBitVec a ||| ISize.ofBitVec b := rfl
@[simp] theorem Int8.ofBitVec_xor (a b : BitVec 8) : Int8.ofBitVec (a ^^^ b) = Int8.ofBitVec a ^^^ Int8.ofBitVec b := rfl
@[simp] theorem Int16.ofBitVec_xor (a b : BitVec 16) : Int16.ofBitVec (a ^^^ b) = Int16.ofBitVec a ^^^ Int16.ofBitVec b := rfl
@[simp] theorem Int32.ofBitVec_xor (a b : BitVec 32) : Int32.ofBitVec (a ^^^ b) = Int32.ofBitVec a ^^^ Int32.ofBitVec b := rfl
@[simp] theorem Int64.ofBitVec_xor (a b : BitVec 64) : Int64.ofBitVec (a ^^^ b) = Int64.ofBitVec a ^^^ Int64.ofBitVec b := rfl
@[simp] theorem ISize.ofBitVec_xor (a b : BitVec System.Platform.numBits) : ISize.ofBitVec (a ^^^ b) = ISize.ofBitVec a ^^^ ISize.ofBitVec b := rfl
@[simp] theorem Int8.ofBitVec_not (a : BitVec 8) : Int8.ofBitVec (~~~a) = ~~~Int8.ofBitVec a := rfl
@[simp] theorem Int16.ofBitVec_not (a : BitVec 16) : Int16.ofBitVec (~~~a) = ~~~Int16.ofBitVec a := rfl
@[simp] theorem Int32.ofBitVec_not (a : BitVec 32) : Int32.ofBitVec (~~~a) = ~~~Int32.ofBitVec a := rfl
@[simp] theorem Int64.ofBitVec_not (a : BitVec 64) : Int64.ofBitVec (~~~a) = ~~~Int64.ofBitVec a := rfl
@[simp] theorem ISize.ofBitVec_not (a : BitVec System.Platform.numBits) : ISize.ofBitVec (~~~a) = ~~~ISize.ofBitVec a := rfl
@[simp] theorem Int8.ofBitVec_intMin : Int8.ofBitVec (BitVec.intMin 8) = Int8.minValue := rfl
@[simp] theorem Int16.ofBitVec_intMin : Int16.ofBitVec (BitVec.intMin 16) = Int16.minValue := rfl
@[simp] theorem Int32.ofBitVec_intMin : Int32.ofBitVec (BitVec.intMin 32) = Int32.minValue := rfl
@[simp] theorem Int64.ofBitVec_intMin : Int64.ofBitVec (BitVec.intMin 64) = Int64.minValue := rfl
@[simp] theorem ISize.ofBitVec_intMin : ISize.ofBitVec (BitVec.intMin System.Platform.numBits) = ISize.minValue :=
ISize.toBitVec_inj.1 (by simp [BitVec.intMin_eq_neg_two_pow])
@[simp] theorem Int8.ofBitVec_intMax : Int8.ofBitVec (BitVec.intMax 8) = Int8.maxValue := rfl
@[simp] theorem Int16.ofBitVec_intMax : Int16.ofBitVec (BitVec.intMax 16) = Int16.maxValue := rfl
@[simp] theorem Int32.ofBitVec_intMax : Int32.ofBitVec (BitVec.intMax 32) = Int32.maxValue := rfl
@[simp] theorem Int64.ofBitVec_intMax : Int64.ofBitVec (BitVec.intMax 64) = Int64.maxValue := rfl
@[simp] theorem ISize.ofBitVec_intMax : ISize.ofBitVec (BitVec.intMax System.Platform.numBits) = ISize.maxValue :=
ISize.toInt_inj.1 (by rw [toInt_ofBitVec, BitVec.toInt_intMax, toInt_maxValue])
theorem Int8.neg_eq_not_add (a : Int8) : -a = ~~~a + 1 := Int8.toBitVec_inj.1 (BitVec.neg_eq_not_add _)
theorem Int16.neg_eq_not_add (a : Int16) : -a = ~~~a + 1 := Int16.toBitVec_inj.1 (BitVec.neg_eq_not_add _)
theorem Int32.neg_eq_not_add (a : Int32) : -a = ~~~a + 1 := Int32.toBitVec_inj.1 (BitVec.neg_eq_not_add _)
theorem Int64.neg_eq_not_add (a : Int64) : -a = ~~~a + 1 := Int64.toBitVec_inj.1 (BitVec.neg_eq_not_add _)
theorem ISize.neg_eq_not_add (a : ISize) : -a = ~~~a + 1 := ISize.toBitVec_inj.1 (BitVec.neg_eq_not_add _)
theorem Int8.not_eq_neg_sub (a : Int8) : ~~~a = -a - 1 := Int8.toBitVec_inj.1 (BitVec.not_eq_neg_add _)
theorem Int16.not_eq_neg_sub (a : Int16) : ~~~a = -a - 1 := Int16.toBitVec_inj.1 (BitVec.not_eq_neg_add _)
theorem Int32.not_eq_neg_sub (a : Int32) : ~~~a = -a - 1 := Int32.toBitVec_inj.1 (BitVec.not_eq_neg_add _)
theorem Int64.not_eq_neg_sub (a : Int64) : ~~~a = -a - 1 := Int64.toBitVec_inj.1 (BitVec.not_eq_neg_add _)
theorem ISize.not_eq_neg_sub (a : ISize) : ~~~a = -a - 1 := ISize.toBitVec_inj.1 (BitVec.not_eq_neg_add _)
protected theorem Int8.or_assoc (a b c : Int8) : a ||| b ||| c = a ||| (b ||| c) := Int8.toBitVec_inj.1 (BitVec.or_assoc _ _ _)
protected theorem Int16.or_assoc (a b c : Int16) : a ||| b ||| c = a ||| (b ||| c) := Int16.toBitVec_inj.1 (BitVec.or_assoc _ _ _)
protected theorem Int32.or_assoc (a b c : Int32) : a ||| b ||| c = a ||| (b ||| c) := Int32.toBitVec_inj.1 (BitVec.or_assoc _ _ _)
protected theorem Int64.or_assoc (a b c : Int64) : a ||| b ||| c = a ||| (b ||| c) := Int64.toBitVec_inj.1 (BitVec.or_assoc _ _ _)
protected theorem ISize.or_assoc (a b c : ISize) : a ||| b ||| c = a ||| (b ||| c) := ISize.toBitVec_inj.1 (BitVec.or_assoc _ _ _)
instance : Std.Associative (α := Int8) (· ||| ·) := Int8.or_assoc
instance : Std.Associative (α := Int16) (· ||| ·) := Int16.or_assoc
instance : Std.Associative (α := Int32) (· ||| ·) := Int32.or_assoc
instance : Std.Associative (α := Int64) (· ||| ·) := Int64.or_assoc
instance : Std.Associative (α := ISize) (· ||| ·) := ISize.or_assoc
protected theorem Int8.or_comm (a b : Int8) : a ||| b = b ||| a := Int8.toBitVec_inj.1 (BitVec.or_comm _ _)
protected theorem Int16.or_comm (a b : Int16) : a ||| b = b ||| a := Int16.toBitVec_inj.1 (BitVec.or_comm _ _)
protected theorem Int32.or_comm (a b : Int32) : a ||| b = b ||| a := Int32.toBitVec_inj.1 (BitVec.or_comm _ _)
protected theorem Int64.or_comm (a b : Int64) : a ||| b = b ||| a := Int64.toBitVec_inj.1 (BitVec.or_comm _ _)
protected theorem ISize.or_comm (a b : ISize) : a ||| b = b ||| a := ISize.toBitVec_inj.1 (BitVec.or_comm _ _)
instance : Std.Commutative (α := Int8) (· ||| ·) := Int8.or_comm
instance : Std.Commutative (α := Int16) (· ||| ·) := Int16.or_comm
instance : Std.Commutative (α := Int32) (· ||| ·) := Int32.or_comm
instance : Std.Commutative (α := Int64) (· ||| ·) := Int64.or_comm
instance : Std.Commutative (α := ISize) (· ||| ·) := ISize.or_comm
@[simp] protected theorem Int8.or_self {a : Int8} : a ||| a = a := Int8.toBitVec_inj.1 BitVec.or_self
@[simp] protected theorem Int16.or_self {a : Int16} : a ||| a = a := Int16.toBitVec_inj.1 BitVec.or_self
@[simp] protected theorem Int32.or_self {a : Int32} : a ||| a = a := Int32.toBitVec_inj.1 BitVec.or_self
@[simp] protected theorem Int64.or_self {a : Int64} : a ||| a = a := Int64.toBitVec_inj.1 BitVec.or_self
@[simp] protected theorem ISize.or_self {a : ISize} : a ||| a = a := ISize.toBitVec_inj.1 BitVec.or_self
instance : Std.IdempotentOp (α := Int8) (· ||| ·) := fun _ => Int8.or_self
instance : Std.IdempotentOp (α := Int16) (· ||| ·) := fun _ => Int16.or_self
instance : Std.IdempotentOp (α := Int32) (· ||| ·) := fun _ => Int32.or_self
instance : Std.IdempotentOp (α := Int64) (· ||| ·) := fun _ => Int64.or_self
instance : Std.IdempotentOp (α := ISize) (· ||| ·) := fun _ => ISize.or_self
@[simp] protected theorem Int8.or_zero {a : Int8} : a ||| 0 = a := Int8.toBitVec_inj.1 BitVec.or_zero
@[simp] protected theorem Int16.or_zero {a : Int16} : a ||| 0 = a := Int16.toBitVec_inj.1 BitVec.or_zero
@[simp] protected theorem Int32.or_zero {a : Int32} : a ||| 0 = a := Int32.toBitVec_inj.1 BitVec.or_zero
@[simp] protected theorem Int64.or_zero {a : Int64} : a ||| 0 = a := Int64.toBitVec_inj.1 BitVec.or_zero
@[simp] protected theorem ISize.or_zero {a : ISize} : a ||| 0 = a := ISize.toBitVec_inj.1 BitVec.or_zero
@[simp] protected theorem Int8.zero_or {a : Int8} : 0 ||| a = a := Int8.toBitVec_inj.1 BitVec.zero_or
@[simp] protected theorem Int16.zero_or {a : Int16} : 0 ||| a = a := Int16.toBitVec_inj.1 BitVec.zero_or
@[simp] protected theorem Int32.zero_or {a : Int32} : 0 ||| a = a := Int32.toBitVec_inj.1 BitVec.zero_or
@[simp] protected theorem Int64.zero_or {a : Int64} : 0 ||| a = a := Int64.toBitVec_inj.1 BitVec.zero_or
@[simp] protected theorem ISize.zero_or {a : ISize} : 0 ||| a = a := ISize.toBitVec_inj.1 BitVec.zero_or
instance : Std.LawfulCommIdentity (α := Int8) (· ||| ·) 0 where
right_id _ := Int8.or_zero
instance : Std.LawfulCommIdentity (α := Int16) (· ||| ·) 0 where
right_id _ := Int16.or_zero
instance : Std.LawfulCommIdentity (α := Int32) (· ||| ·) 0 where
right_id _ := Int32.or_zero
instance : Std.LawfulCommIdentity (α := Int64) (· ||| ·) 0 where
right_id _ := Int64.or_zero
instance : Std.LawfulCommIdentity (α := ISize) (· ||| ·) 0 where
right_id _ := ISize.or_zero
@[simp] theorem Int8.neg_one_or {a : Int8} : -1 ||| a = -1 := by
rw [ Int8.toBitVec_inj, Int8.toBitVec_or, Int8.toBitVec_neg, Int8.toBitVec_one,
BitVec.negOne_eq_allOnes, BitVec.allOnes_or]
@[simp] theorem Int16.neg_one_or {a : Int16} : -1 ||| a = -1 := by
rw [ Int16.toBitVec_inj, Int16.toBitVec_or, Int16.toBitVec_neg, Int16.toBitVec_one,
BitVec.negOne_eq_allOnes, BitVec.allOnes_or]
@[simp] theorem Int32.neg_one_or {a : Int32} : -1 ||| a = -1 := by
rw [ Int32.toBitVec_inj, Int32.toBitVec_or, Int32.toBitVec_neg, Int32.toBitVec_one,
BitVec.negOne_eq_allOnes, BitVec.allOnes_or]
@[simp] theorem Int64.neg_one_or {a : Int64} : -1 ||| a = -1 := by
rw [ Int64.toBitVec_inj, Int64.toBitVec_or, Int64.toBitVec_neg, Int64.toBitVec_one,
BitVec.negOne_eq_allOnes, BitVec.allOnes_or]
@[simp] theorem ISize.neg_one_or {a : ISize} : -1 ||| a = -1 := by
rw [ ISize.toBitVec_inj, ISize.toBitVec_or, ISize.toBitVec_neg, ISize.toBitVec_one,
BitVec.negOne_eq_allOnes, BitVec.allOnes_or]
@[simp] theorem Int8.or_neg_one {a : Int8} : a ||| -1 = -1 := by rw [Int8.or_comm, neg_one_or]
@[simp] theorem Int16.or_neg_one {a : Int16} : a ||| -1 = -1 := by rw [Int16.or_comm, neg_one_or]
@[simp] theorem Int32.or_neg_one {a : Int32} : a ||| -1 = -1 := by rw [Int32.or_comm, neg_one_or]
@[simp] theorem Int64.or_neg_one {a : Int64} : a ||| -1 = -1 := by rw [Int64.or_comm, neg_one_or]
@[simp] theorem ISize.or_neg_one {a : ISize} : a ||| -1 = -1 := by rw [ISize.or_comm, neg_one_or]
@[simp] theorem Int8.or_eq_zero_iff {a b : Int8} : a ||| b = 0 a = 0 b = 0 := by
simp [ Int8.toBitVec_inj]
@[simp] theorem Int16.or_eq_zero_iff {a b : Int16} : a ||| b = 0 a = 0 b = 0 := by
simp [ Int16.toBitVec_inj]
@[simp] theorem Int32.or_eq_zero_iff {a b : Int32} : a ||| b = 0 a = 0 b = 0 := by
simp [ Int32.toBitVec_inj]
@[simp] theorem Int64.or_eq_zero_iff {a b : Int64} : a ||| b = 0 a = 0 b = 0 := by
simp [ Int64.toBitVec_inj]
@[simp] theorem ISize.or_eq_zero_iff {a b : ISize} : a ||| b = 0 a = 0 b = 0 := by
simp [ ISize.toBitVec_inj]
protected theorem Int8.and_assoc (a b c : Int8) : a &&& b &&& c = a &&& (b &&& c) := Int8.toBitVec_inj.1 (BitVec.and_assoc _ _ _)
protected theorem Int16.and_assoc (a b c : Int16) : a &&& b &&& c = a &&& (b &&& c) := Int16.toBitVec_inj.1 (BitVec.and_assoc _ _ _)
protected theorem Int32.and_assoc (a b c : Int32) : a &&& b &&& c = a &&& (b &&& c) := Int32.toBitVec_inj.1 (BitVec.and_assoc _ _ _)
protected theorem Int64.and_assoc (a b c : Int64) : a &&& b &&& c = a &&& (b &&& c) := Int64.toBitVec_inj.1 (BitVec.and_assoc _ _ _)
protected theorem ISize.and_assoc (a b c : ISize) : a &&& b &&& c = a &&& (b &&& c) := ISize.toBitVec_inj.1 (BitVec.and_assoc _ _ _)
instance : Std.Associative (α := Int8) (· &&& ·) := Int8.and_assoc
instance : Std.Associative (α := Int16) (· &&& ·) := Int16.and_assoc
instance : Std.Associative (α := Int32) (· &&& ·) := Int32.and_assoc
instance : Std.Associative (α := Int64) (· &&& ·) := Int64.and_assoc
instance : Std.Associative (α := ISize) (· &&& ·) := ISize.and_assoc
protected theorem Int8.and_comm (a b : Int8) : a &&& b = b &&& a := Int8.toBitVec_inj.1 (BitVec.and_comm _ _)
protected theorem Int16.and_comm (a b : Int16) : a &&& b = b &&& a := Int16.toBitVec_inj.1 (BitVec.and_comm _ _)
protected theorem Int32.and_comm (a b : Int32) : a &&& b = b &&& a := Int32.toBitVec_inj.1 (BitVec.and_comm _ _)
protected theorem Int64.and_comm (a b : Int64) : a &&& b = b &&& a := Int64.toBitVec_inj.1 (BitVec.and_comm _ _)
protected theorem ISize.and_comm (a b : ISize) : a &&& b = b &&& a := ISize.toBitVec_inj.1 (BitVec.and_comm _ _)
instance : Std.Commutative (α := Int8) (· &&& ·) := Int8.and_comm
instance : Std.Commutative (α := Int16) (· &&& ·) := Int16.and_comm
instance : Std.Commutative (α := Int32) (· &&& ·) := Int32.and_comm
instance : Std.Commutative (α := Int64) (· &&& ·) := Int64.and_comm
instance : Std.Commutative (α := ISize) (· &&& ·) := ISize.and_comm
@[simp] protected theorem Int8.and_self {a : Int8} : a &&& a = a := Int8.toBitVec_inj.1 BitVec.and_self
@[simp] protected theorem Int16.and_self {a : Int16} : a &&& a = a := Int16.toBitVec_inj.1 BitVec.and_self
@[simp] protected theorem Int32.and_self {a : Int32} : a &&& a = a := Int32.toBitVec_inj.1 BitVec.and_self
@[simp] protected theorem Int64.and_self {a : Int64} : a &&& a = a := Int64.toBitVec_inj.1 BitVec.and_self
@[simp] protected theorem ISize.and_self {a : ISize} : a &&& a = a := ISize.toBitVec_inj.1 BitVec.and_self
instance : Std.IdempotentOp (α := Int8) (· &&& ·) := fun _ => Int8.and_self
instance : Std.IdempotentOp (α := Int16) (· &&& ·) := fun _ => Int16.and_self
instance : Std.IdempotentOp (α := Int32) (· &&& ·) := fun _ => Int32.and_self
instance : Std.IdempotentOp (α := Int64) (· &&& ·) := fun _ => Int64.and_self
instance : Std.IdempotentOp (α := ISize) (· &&& ·) := fun _ => ISize.and_self
@[simp] protected theorem Int8.and_zero {a : Int8} : a &&& 0 = 0 := Int8.toBitVec_inj.1 BitVec.and_zero
@[simp] protected theorem Int16.and_zero {a : Int16} : a &&& 0 = 0 := Int16.toBitVec_inj.1 BitVec.and_zero
@[simp] protected theorem Int32.and_zero {a : Int32} : a &&& 0 = 0 := Int32.toBitVec_inj.1 BitVec.and_zero
@[simp] protected theorem Int64.and_zero {a : Int64} : a &&& 0 = 0 := Int64.toBitVec_inj.1 BitVec.and_zero
@[simp] protected theorem ISize.and_zero {a : ISize} : a &&& 0 = 0 := ISize.toBitVec_inj.1 BitVec.and_zero
@[simp] protected theorem Int8.zero_and {a : Int8} : 0 &&& a = 0 := Int8.toBitVec_inj.1 BitVec.zero_and
@[simp] protected theorem Int16.zero_and {a : Int16} : 0 &&& a = 0 := Int16.toBitVec_inj.1 BitVec.zero_and
@[simp] protected theorem Int32.zero_and {a : Int32} : 0 &&& a = 0 := Int32.toBitVec_inj.1 BitVec.zero_and
@[simp] protected theorem Int64.zero_and {a : Int64} : 0 &&& a = 0 := Int64.toBitVec_inj.1 BitVec.zero_and
@[simp] protected theorem ISize.zero_and {a : ISize} : 0 &&& a = 0 := ISize.toBitVec_inj.1 BitVec.zero_and
@[simp] theorem Int8.neg_one_and {a : Int8} : -1 &&& a = a := by
rw [ Int8.toBitVec_inj, Int8.toBitVec_and, Int8.toBitVec_neg, Int8.toBitVec_one,
BitVec.negOne_eq_allOnes, BitVec.allOnes_and]
@[simp] theorem Int16.neg_one_and {a : Int16} : -1 &&& a = a := by
rw [ Int16.toBitVec_inj, Int16.toBitVec_and, Int16.toBitVec_neg, Int16.toBitVec_one,
BitVec.negOne_eq_allOnes, BitVec.allOnes_and]
@[simp] theorem Int32.neg_one_and {a : Int32} : -1 &&& a = a := by
rw [ Int32.toBitVec_inj, Int32.toBitVec_and, Int32.toBitVec_neg, Int32.toBitVec_one,
BitVec.negOne_eq_allOnes, BitVec.allOnes_and]
@[simp] theorem Int64.neg_one_and {a : Int64} : -1 &&& a = a := by
rw [ Int64.toBitVec_inj, Int64.toBitVec_and, Int64.toBitVec_neg, Int64.toBitVec_one,
BitVec.negOne_eq_allOnes, BitVec.allOnes_and]
@[simp] theorem ISize.neg_one_and {a : ISize} : -1 &&& a = a := by
rw [ ISize.toBitVec_inj, ISize.toBitVec_and, ISize.toBitVec_neg, ISize.toBitVec_one,
BitVec.negOne_eq_allOnes, BitVec.allOnes_and]
@[simp] theorem Int8.and_neg_one {a : Int8} : a &&& -1 = a := by rw [Int8.and_comm, neg_one_and]
@[simp] theorem Int16.and_neg_one {a : Int16} : a &&& -1 = a := by rw [Int16.and_comm, neg_one_and]
@[simp] theorem Int32.and_neg_one {a : Int32} : a &&& -1 = a := by rw [Int32.and_comm, neg_one_and]
@[simp] theorem Int64.and_neg_one {a : Int64} : a &&& -1 = a := by rw [Int64.and_comm, neg_one_and]
@[simp] theorem ISize.and_neg_one {a : ISize} : a &&& -1 = a := by rw [ISize.and_comm, neg_one_and]
instance : Std.LawfulCommIdentity (α := Int8) (· &&& ·) (-1) where
right_id _ := Int8.and_neg_one
instance : Std.LawfulCommIdentity (α := Int16) (· &&& ·) (-1) where
right_id _ := Int16.and_neg_one
instance : Std.LawfulCommIdentity (α := Int32) (· &&& ·) (-1) where
right_id _ := Int32.and_neg_one
instance : Std.LawfulCommIdentity (α := Int64) (· &&& ·) (-1) where
right_id _ := Int64.and_neg_one
instance : Std.LawfulCommIdentity (α := ISize) (· &&& ·) (-1) where
right_id _ := ISize.and_neg_one
@[simp] theorem Int8.and_eq_neg_one_iff {a b : Int8} : a &&& b = -1 a = -1 b = -1 := by
simp only [ Int8.toBitVec_inj, Int8.toBitVec_and, Int8.toBitVec_neg, Int8.toBitVec_one,
BitVec.negOne_eq_allOnes, BitVec.and_eq_allOnes_iff]
@[simp] theorem Int16.and_eq_neg_one_iff {a b : Int16} : a &&& b = -1 a = -1 b = -1 := by
simp only [ Int16.toBitVec_inj, Int16.toBitVec_and, Int16.toBitVec_neg, Int16.toBitVec_one,
BitVec.negOne_eq_allOnes, BitVec.and_eq_allOnes_iff]
@[simp] theorem Int32.and_eq_neg_one_iff {a b : Int32} : a &&& b = -1 a = -1 b = -1 := by
simp only [ Int32.toBitVec_inj, Int32.toBitVec_and, Int32.toBitVec_neg, Int32.toBitVec_one,
BitVec.negOne_eq_allOnes, BitVec.and_eq_allOnes_iff]
@[simp] theorem Int64.and_eq_neg_one_iff {a b : Int64} : a &&& b = -1 a = -1 b = -1 := by
simp only [ Int64.toBitVec_inj, Int64.toBitVec_and, Int64.toBitVec_neg, Int64.toBitVec_one,
BitVec.negOne_eq_allOnes, BitVec.and_eq_allOnes_iff]
@[simp] theorem ISize.and_eq_neg_one_iff {a b : ISize} : a &&& b = -1 a = -1 b = -1 := by
simp only [ ISize.toBitVec_inj, ISize.toBitVec_and, ISize.toBitVec_neg, ISize.toBitVec_one,
BitVec.negOne_eq_allOnes, BitVec.and_eq_allOnes_iff]
protected theorem Int8.xor_assoc (a b c : Int8) : a ^^^ b ^^^ c = a ^^^ (b ^^^ c) := Int8.toBitVec_inj.1 (BitVec.xor_assoc _ _ _)
protected theorem Int16.xor_assoc (a b c : Int16) : a ^^^ b ^^^ c = a ^^^ (b ^^^ c) := Int16.toBitVec_inj.1 (BitVec.xor_assoc _ _ _)
protected theorem Int32.xor_assoc (a b c : Int32) : a ^^^ b ^^^ c = a ^^^ (b ^^^ c) := Int32.toBitVec_inj.1 (BitVec.xor_assoc _ _ _)
protected theorem Int64.xor_assoc (a b c : Int64) : a ^^^ b ^^^ c = a ^^^ (b ^^^ c) := Int64.toBitVec_inj.1 (BitVec.xor_assoc _ _ _)
protected theorem ISize.xor_assoc (a b c : ISize) : a ^^^ b ^^^ c = a ^^^ (b ^^^ c) := ISize.toBitVec_inj.1 (BitVec.xor_assoc _ _ _)
instance : Std.Associative (α := Int8) (· ^^^ ·) := Int8.xor_assoc
instance : Std.Associative (α := Int16) (· ^^^ ·) := Int16.xor_assoc
instance : Std.Associative (α := Int32) (· ^^^ ·) := Int32.xor_assoc
instance : Std.Associative (α := Int64) (· ^^^ ·) := Int64.xor_assoc
instance : Std.Associative (α := ISize) (· ^^^ ·) := ISize.xor_assoc
protected theorem Int8.xor_comm (a b : Int8) : a ^^^ b = b ^^^ a := Int8.toBitVec_inj.1 (BitVec.xor_comm _ _)
protected theorem Int16.xor_comm (a b : Int16) : a ^^^ b = b ^^^ a := Int16.toBitVec_inj.1 (BitVec.xor_comm _ _)
protected theorem Int32.xor_comm (a b : Int32) : a ^^^ b = b ^^^ a := Int32.toBitVec_inj.1 (BitVec.xor_comm _ _)
protected theorem Int64.xor_comm (a b : Int64) : a ^^^ b = b ^^^ a := Int64.toBitVec_inj.1 (BitVec.xor_comm _ _)
protected theorem ISize.xor_comm (a b : ISize) : a ^^^ b = b ^^^ a := ISize.toBitVec_inj.1 (BitVec.xor_comm _ _)
instance : Std.Commutative (α := Int8) (· ^^^ ·) := Int8.xor_comm
instance : Std.Commutative (α := Int16) (· ^^^ ·) := Int16.xor_comm
instance : Std.Commutative (α := Int32) (· ^^^ ·) := Int32.xor_comm
instance : Std.Commutative (α := Int64) (· ^^^ ·) := Int64.xor_comm
instance : Std.Commutative (α := ISize) (· ^^^ ·) := ISize.xor_comm
@[simp] protected theorem Int8.xor_self {a : Int8} : a ^^^ a = 0 := Int8.toBitVec_inj.1 BitVec.xor_self
@[simp] protected theorem Int16.xor_self {a : Int16} : a ^^^ a = 0 := Int16.toBitVec_inj.1 BitVec.xor_self
@[simp] protected theorem Int32.xor_self {a : Int32} : a ^^^ a = 0 := Int32.toBitVec_inj.1 BitVec.xor_self
@[simp] protected theorem Int64.xor_self {a : Int64} : a ^^^ a = 0 := Int64.toBitVec_inj.1 BitVec.xor_self
@[simp] protected theorem ISize.xor_self {a : ISize} : a ^^^ a = 0 := ISize.toBitVec_inj.1 BitVec.xor_self
@[simp] protected theorem Int8.xor_zero {a : Int8} : a ^^^ 0 = a := Int8.toBitVec_inj.1 BitVec.xor_zero
@[simp] protected theorem Int16.xor_zero {a : Int16} : a ^^^ 0 = a := Int16.toBitVec_inj.1 BitVec.xor_zero
@[simp] protected theorem Int32.xor_zero {a : Int32} : a ^^^ 0 = a := Int32.toBitVec_inj.1 BitVec.xor_zero
@[simp] protected theorem Int64.xor_zero {a : Int64} : a ^^^ 0 = a := Int64.toBitVec_inj.1 BitVec.xor_zero
@[simp] protected theorem ISize.xor_zero {a : ISize} : a ^^^ 0 = a := ISize.toBitVec_inj.1 BitVec.xor_zero
@[simp] protected theorem Int8.zero_xor {a : Int8} : 0 ^^^ a = a := Int8.toBitVec_inj.1 BitVec.zero_xor
@[simp] protected theorem Int16.zero_xor {a : Int16} : 0 ^^^ a = a := Int16.toBitVec_inj.1 BitVec.zero_xor
@[simp] protected theorem Int32.zero_xor {a : Int32} : 0 ^^^ a = a := Int32.toBitVec_inj.1 BitVec.zero_xor
@[simp] protected theorem Int64.zero_xor {a : Int64} : 0 ^^^ a = a := Int64.toBitVec_inj.1 BitVec.zero_xor
@[simp] protected theorem ISize.zero_xor {a : ISize} : 0 ^^^ a = a := ISize.toBitVec_inj.1 BitVec.zero_xor
@[simp] theorem Int8.neg_one_xor {a : Int8} : -1 ^^^ a = ~~~a := by
rw [ Int8.toBitVec_inj, Int8.toBitVec_xor, Int8.toBitVec_neg, Int8.toBitVec_one,
BitVec.negOne_eq_allOnes, BitVec.allOnes_xor, Int8.toBitVec_not]
@[simp] theorem Int16.neg_one_xor {a : Int16} : -1 ^^^ a = ~~~a := by
rw [ Int16.toBitVec_inj, Int16.toBitVec_xor, Int16.toBitVec_neg, Int16.toBitVec_one,
BitVec.negOne_eq_allOnes, BitVec.allOnes_xor, Int16.toBitVec_not]
@[simp] theorem Int32.neg_one_xor {a : Int32} : -1 ^^^ a = ~~~a := by
rw [ Int32.toBitVec_inj, Int32.toBitVec_xor, Int32.toBitVec_neg, Int32.toBitVec_one,
BitVec.negOne_eq_allOnes, BitVec.allOnes_xor, Int32.toBitVec_not]
@[simp] theorem Int64.neg_one_xor {a : Int64} : -1 ^^^ a = ~~~a := by
rw [ Int64.toBitVec_inj, Int64.toBitVec_xor, Int64.toBitVec_neg, Int64.toBitVec_one,
BitVec.negOne_eq_allOnes, BitVec.allOnes_xor, Int64.toBitVec_not]
@[simp] theorem ISize.neg_one_xor {a : ISize} : -1 ^^^ a = ~~~a := by
rw [ ISize.toBitVec_inj, ISize.toBitVec_xor, ISize.toBitVec_neg, ISize.toBitVec_one,
BitVec.negOne_eq_allOnes, BitVec.allOnes_xor, ISize.toBitVec_not]
@[simp] theorem Int8.xor_neg_one {a : Int8} : a ^^^ -1 = ~~~a := by rw [Int8.xor_comm, neg_one_xor]
@[simp] theorem Int16.xor_neg_one {a : Int16} : a ^^^ -1 = ~~~a := by rw [Int16.xor_comm, neg_one_xor]
@[simp] theorem Int32.xor_neg_one {a : Int32} : a ^^^ -1 = ~~~a := by rw [Int32.xor_comm, neg_one_xor]
@[simp] theorem Int64.xor_neg_one {a : Int64} : a ^^^ -1 = ~~~a := by rw [Int64.xor_comm, neg_one_xor]
@[simp] theorem ISize.xor_neg_one {a : ISize} : a ^^^ -1 = ~~~a := by rw [ISize.xor_comm, neg_one_xor]
instance : Std.LawfulCommIdentity (α := Int8) (· ^^^ ·) 0 where
right_id _ := Int8.xor_zero
instance : Std.LawfulCommIdentity (α := Int16) (· ^^^ ·) 0 where
right_id _ := Int16.xor_zero
instance : Std.LawfulCommIdentity (α := Int32) (· ^^^ ·) 0 where
right_id _ := Int32.xor_zero
instance : Std.LawfulCommIdentity (α := Int64) (· ^^^ ·) 0 where
right_id _ := Int64.xor_zero
instance : Std.LawfulCommIdentity (α := ISize) (· ^^^ ·) 0 where
right_id _ := ISize.xor_zero
@[simp] theorem Int8.xor_eq_zero_iff {a b : Int8} : a ^^^ b = 0 a = b := by simp [ Int8.toBitVec_inj]
@[simp] theorem Int16.xor_eq_zero_iff {a b : Int16} : a ^^^ b = 0 a = b := by simp [ Int16.toBitVec_inj]
@[simp] theorem Int32.xor_eq_zero_iff {a b : Int32} : a ^^^ b = 0 a = b := by simp [ Int32.toBitVec_inj]
@[simp] theorem Int64.xor_eq_zero_iff {a b : Int64} : a ^^^ b = 0 a = b := by simp [ Int64.toBitVec_inj]
@[simp] theorem ISize.xor_eq_zero_iff {a b : ISize} : a ^^^ b = 0 a = b := by simp [ ISize.toBitVec_inj]
@[simp] theorem Int8.xor_left_inj {a b : Int8} (c : Int8) : (a ^^^ c = b ^^^ c) a = b := by
simp [ Int8.toBitVec_inj]
@[simp] theorem Int16.xor_left_inj {a b : Int16} (c : Int16) : (a ^^^ c = b ^^^ c) a = b := by
simp [ Int16.toBitVec_inj]
@[simp] theorem Int32.xor_left_inj {a b : Int32} (c : Int32) : (a ^^^ c = b ^^^ c) a = b := by
simp [ Int32.toBitVec_inj]
@[simp] theorem Int64.xor_left_inj {a b : Int64} (c : Int64) : (a ^^^ c = b ^^^ c) a = b := by
simp [ Int64.toBitVec_inj]
@[simp] theorem ISize.xor_left_inj {a b : ISize} (c : ISize) : (a ^^^ c = b ^^^ c) a = b := by
simp [ ISize.toBitVec_inj]
@[simp] theorem Int8.xor_right_inj {a b : Int8} (c : Int8) : (c ^^^ a = c ^^^ b) a = b := by
simp [ Int8.toBitVec_inj]
@[simp] theorem Int16.xor_right_inj {a b : Int16} (c : Int16) : (c ^^^ a = c ^^^ b) a = b := by
simp [ Int16.toBitVec_inj]
@[simp] theorem Int32.xor_right_inj {a b : Int32} (c : Int32) : (c ^^^ a = c ^^^ b) a = b := by
simp [ Int32.toBitVec_inj]
@[simp] theorem Int64.xor_right_inj {a b : Int64} (c : Int64) : (c ^^^ a = c ^^^ b) a = b := by
simp [ Int64.toBitVec_inj]
@[simp] theorem ISize.xor_right_inj {a b : ISize} (c : ISize) : (c ^^^ a = c ^^^ b) a = b := by
simp [ ISize.toBitVec_inj]
@[simp] theorem Int8.not_zero : ~~~(0 : Int8) = -1 := rfl
@[simp] theorem Int16.not_zero : ~~~(0 : Int16) = -1 := rfl
@[simp] theorem Int32.not_zero : ~~~(0 : Int32) = -1 := rfl
@[simp] theorem Int64.not_zero : ~~~(0 : Int64) = -1 := rfl
@[simp] theorem ISize.not_zero : ~~~(0 : ISize) = -1 := by simp [ISize.not_eq_neg_sub]
@[simp] theorem Int8.not_neg_one : ~~~(-1 : Int8) = 0 := rfl
@[simp] theorem Int16.not_neg_one : ~~~(-1 : Int16) = 0 := rfl
@[simp] theorem Int32.not_neg_one : ~~~(-1 : Int32) = 0 := rfl
@[simp] theorem Int64.not_neg_one : ~~~(-1 : Int64) = 0 := rfl
@[simp] theorem ISize.not_neg_one : ~~~(-1 : ISize) = 0 := by simp [ISize.not_eq_neg_sub]
@[simp] theorem Int8.not_not {a : Int8} : ~~~(~~~a) = a := by simp [ Int8.toBitVec_inj]
@[simp] theorem Int16.not_not {a : Int16} : ~~~(~~~a) = a := by simp [ Int16.toBitVec_inj]
@[simp] theorem Int32.not_not {a : Int32} : ~~~(~~~a) = a := by simp [ Int32.toBitVec_inj]
@[simp] theorem Int64.not_not {a : Int64} : ~~~(~~~a) = a := by simp [ Int64.toBitVec_inj]
@[simp] theorem ISize.not_not {a : ISize} : ~~~(~~~a) = a := by simp [ ISize.toBitVec_inj]
@[simp] theorem Int8.not_inj {a b : Int8} : ~~~a = ~~~b a = b := by simp [ Int8.toBitVec_inj]
@[simp] theorem Int16.not_inj {a b : Int16} : ~~~a = ~~~b a = b := by simp [ Int16.toBitVec_inj]
@[simp] theorem Int32.not_inj {a b : Int32} : ~~~a = ~~~b a = b := by simp [ Int32.toBitVec_inj]
@[simp] theorem Int64.not_inj {a b : Int64} : ~~~a = ~~~b a = b := by simp [ Int64.toBitVec_inj]
@[simp] theorem ISize.not_inj {a b : ISize} : ~~~a = ~~~b a = b := by simp [ ISize.toBitVec_inj]
@[simp] theorem Int8.and_not_self {a : Int8} : a &&& ~~~a = 0 := by simp [ Int8.toBitVec_inj]
@[simp] theorem Int16.and_not_self {a : Int16} : a &&& ~~~a = 0 := by simp [ Int16.toBitVec_inj]
@[simp] theorem Int32.and_not_self {a : Int32} : a &&& ~~~a = 0 := by simp [ Int32.toBitVec_inj]
@[simp] theorem Int64.and_not_self {a : Int64} : a &&& ~~~a = 0 := by simp [ Int64.toBitVec_inj]
@[simp] theorem ISize.and_not_self {a : ISize} : a &&& ~~~a = 0 := by simp [ ISize.toBitVec_inj]
@[simp] theorem Int8.not_and_self {a : Int8} : ~~~a &&& a = 0 := by simp [Int8.and_comm]
@[simp] theorem Int16.not_and_self {a : Int16} : ~~~a &&& a = 0 := by simp [Int16.and_comm]
@[simp] theorem Int32.not_and_self {a : Int32} : ~~~a &&& a = 0 := by simp [Int32.and_comm]
@[simp] theorem Int64.not_and_self {a : Int64} : ~~~a &&& a = 0 := by simp [Int64.and_comm]
@[simp] theorem ISize.not_and_self {a : ISize} : ~~~a &&& a = 0 := by simp [ISize.and_comm]
@[simp] theorem Int8.or_not_self {a : Int8} : a ||| ~~~a = -1 := by
rw [ Int8.toBitVec_inj, Int8.toBitVec_or, Int8.toBitVec_not, BitVec.or_not_self,
Int8.toBitVec_neg, Int8.toBitVec_one, BitVec.negOne_eq_allOnes]
@[simp] theorem Int16.or_not_self {a : Int16} : a ||| ~~~a = -1 := by
rw [ Int16.toBitVec_inj, Int16.toBitVec_or, Int16.toBitVec_not, BitVec.or_not_self,
Int16.toBitVec_neg, Int16.toBitVec_one, BitVec.negOne_eq_allOnes]
@[simp] theorem Int32.or_not_self {a : Int32} : a ||| ~~~a = -1 := by
rw [ Int32.toBitVec_inj, Int32.toBitVec_or, Int32.toBitVec_not, BitVec.or_not_self,
Int32.toBitVec_neg, Int32.toBitVec_one, BitVec.negOne_eq_allOnes]
@[simp] theorem Int64.or_not_self {a : Int64} : a ||| ~~~a = -1 := by
rw [ Int64.toBitVec_inj, Int64.toBitVec_or, Int64.toBitVec_not, BitVec.or_not_self,
Int64.toBitVec_neg, Int64.toBitVec_one, BitVec.negOne_eq_allOnes]
@[simp] theorem ISize.or_not_self {a : ISize} : a ||| ~~~a = -1 := by
rw [ ISize.toBitVec_inj, ISize.toBitVec_or, ISize.toBitVec_not, BitVec.or_not_self,
ISize.toBitVec_neg, ISize.toBitVec_one, BitVec.negOne_eq_allOnes]
@[simp] theorem Int8.not_or_self {a : Int8} : ~~~a ||| a = -1 := by simp [Int8.or_comm]
@[simp] theorem Int16.not_or_self {a : Int16} : ~~~a ||| a = -1 := by simp [Int16.or_comm]
@[simp] theorem Int32.not_or_self {a : Int32} : ~~~a ||| a = -1 := by simp [Int32.or_comm]
@[simp] theorem Int64.not_or_self {a : Int64} : ~~~a ||| a = -1 := by simp [Int64.or_comm]
@[simp] theorem ISize.not_or_self {a : ISize} : ~~~a ||| a = -1 := by simp [ISize.or_comm]
theorem Int8.not_eq_comm {a b : Int8} : ~~~a = b a = ~~~b := by
simp [ Int8.toBitVec_inj, BitVec.not_eq_comm]
theorem Int16.not_eq_comm {a b : Int16} : ~~~a = b a = ~~~b := by
simp [ Int16.toBitVec_inj, BitVec.not_eq_comm]
theorem Int32.not_eq_comm {a b : Int32} : ~~~a = b a = ~~~b := by
simp [ Int32.toBitVec_inj, BitVec.not_eq_comm]
theorem Int64.not_eq_comm {a b : Int64} : ~~~a = b a = ~~~b := by
simp [ Int64.toBitVec_inj, BitVec.not_eq_comm]
theorem ISize.not_eq_comm {a b : ISize} : ~~~a = b a = ~~~b := by
simp [ ISize.toBitVec_inj, BitVec.not_eq_comm]
@[simp] theorem Int8.ne_not_self {a : Int8} : a ~~~a := by simp [ Int8.toBitVec_inj]
@[simp] theorem Int16.ne_not_self {a : Int16} : a ~~~a := by simp [ Int16.toBitVec_inj]
@[simp] theorem Int32.ne_not_self {a : Int32} : a ~~~a := by simp [ Int32.toBitVec_inj]
@[simp] theorem Int64.ne_not_self {a : Int64} : a ~~~a := by simp [ Int64.toBitVec_inj]
@[simp] theorem ISize.ne_not_self {a : ISize} : a ~~~a := by simp [ ISize.toBitVec_inj]
@[simp] theorem Int8.not_ne_self {a : Int8} : ~~~a a := by simp [ Int8.toBitVec_inj]
@[simp] theorem Int16.not_ne_self {a : Int16} : ~~~a a := by simp [ Int16.toBitVec_inj]
@[simp] theorem Int32.not_ne_self {a : Int32} : ~~~a a := by simp [ Int32.toBitVec_inj]
@[simp] theorem Int64.not_ne_self {a : Int64} : ~~~a a := by simp [ Int64.toBitVec_inj]
@[simp] theorem ISize.not_ne_self {a : ISize} : ~~~a a := by simp [ ISize.toBitVec_inj]
theorem Int8.not_xor {a b : Int8} : ~~~a ^^^ b = ~~~(a ^^^ b) := by
simp [ Int8.toBitVec_inj, BitVec.not_xor_left]
theorem Int16.not_xor {a b : Int16} : ~~~a ^^^ b = ~~~(a ^^^ b) := by
simp [ Int16.toBitVec_inj, BitVec.not_xor_left]
theorem Int32.not_xor {a b : Int32} : ~~~a ^^^ b = ~~~(a ^^^ b) := by
simp [ Int32.toBitVec_inj, BitVec.not_xor_left]
theorem Int64.not_xor {a b : Int64} : ~~~a ^^^ b = ~~~(a ^^^ b) := by
simp [ Int64.toBitVec_inj, BitVec.not_xor_left]
theorem ISize.not_xor {a b : ISize} : ~~~a ^^^ b = ~~~(a ^^^ b) := by
simp [ ISize.toBitVec_inj, BitVec.not_xor_left]
theorem Int8.xor_not {a b : Int8} : a ^^^ ~~~b = ~~~(a ^^^ b) := by
simp [ Int8.toBitVec_inj, BitVec.not_xor_right]
theorem Int16.xor_not {a b : Int16} : a ^^^ ~~~b = ~~~(a ^^^ b) := by
simp [ Int16.toBitVec_inj, BitVec.not_xor_right]
theorem Int32.xor_not {a b : Int32} : a ^^^ ~~~b = ~~~(a ^^^ b) := by
simp [ Int32.toBitVec_inj, BitVec.not_xor_right]
theorem Int64.xor_not {a b : Int64} : a ^^^ ~~~b = ~~~(a ^^^ b) := by
simp [ Int64.toBitVec_inj, BitVec.not_xor_right]
theorem ISize.xor_not {a b : ISize} : a ^^^ ~~~b = ~~~(a ^^^ b) := by
simp [ ISize.toBitVec_inj, BitVec.not_xor_right]
@[simp] theorem Int8.shiftLeft_zero {a : Int8} : a <<< 0 = a := by simp [ Int8.toBitVec_inj]
@[simp] theorem Int16.shiftLeft_zero {a : Int16} : a <<< 0 = a := by simp [ Int16.toBitVec_inj]
@[simp] theorem Int32.shiftLeft_zero {a : Int32} : a <<< 0 = a := by simp [ Int32.toBitVec_inj]
@[simp] theorem Int64.shiftLeft_zero {a : Int64} : a <<< 0 = a := by simp [ Int64.toBitVec_inj]
@[simp] theorem ISize.shiftLeft_zero {a : ISize} : a <<< 0 = a := by simp [ ISize.toBitVec_inj]
@[simp] theorem Int8.zero_shiftLeft {a : Int8} : 0 <<< a = 0 := by simp [ Int8.toBitVec_inj]
@[simp] theorem Int16.zero_shiftLeft {a : Int16} : 0 <<< a = 0 := by simp [ Int16.toBitVec_inj]
@[simp] theorem Int32.zero_shiftLeft {a : Int32} : 0 <<< a = 0 := by simp [ Int32.toBitVec_inj]
@[simp] theorem Int64.zero_shiftLeft {a : Int64} : 0 <<< a = 0 := by simp [ Int64.toBitVec_inj]
@[simp] theorem ISize.zero_shiftLeft {a : ISize} : 0 <<< a = 0 := by simp [ ISize.toBitVec_inj]
theorem Int8.shiftLeft_xor {a b c : Int8} : (a ^^^ b) <<< c = (a <<< c) ^^^ (b <<< c) := by
simp [ Int8.toBitVec_inj, BitVec.shiftLeft_xor_distrib]
theorem Int16.shiftLeft_xor {a b c : Int16} : (a ^^^ b) <<< c = (a <<< c) ^^^ (b <<< c) := by
simp [ Int16.toBitVec_inj, BitVec.shiftLeft_xor_distrib]
theorem Int32.shiftLeft_xor {a b c : Int32} : (a ^^^ b) <<< c = (a <<< c) ^^^ (b <<< c) := by
simp [ Int32.toBitVec_inj, BitVec.shiftLeft_xor_distrib]
theorem Int64.shiftLeft_xor {a b c : Int64} : (a ^^^ b) <<< c = (a <<< c) ^^^ (b <<< c) := by
simp [ Int64.toBitVec_inj, BitVec.shiftLeft_xor_distrib]
theorem ISize.shiftLeft_xor {a b c : ISize} : (a ^^^ b) <<< c = (a <<< c) ^^^ (b <<< c) := by
simp [ ISize.toBitVec_inj, BitVec.shiftLeft_xor_distrib]
theorem Int8.shiftLeft_and {a b c : Int8} : (a &&& b) <<< c = (a <<< c) &&& (b <<< c) := by
simp [ Int8.toBitVec_inj, BitVec.shiftLeft_and_distrib]
theorem Int16.shiftLeft_and {a b c : Int16} : (a &&& b) <<< c = (a <<< c) &&& (b <<< c) := by
simp [ Int16.toBitVec_inj, BitVec.shiftLeft_and_distrib]
theorem Int32.shiftLeft_and {a b c : Int32} : (a &&& b) <<< c = (a <<< c) &&& (b <<< c) := by
simp [ Int32.toBitVec_inj, BitVec.shiftLeft_and_distrib]
theorem Int64.shiftLeft_and {a b c : Int64} : (a &&& b) <<< c = (a <<< c) &&& (b <<< c) := by
simp [ Int64.toBitVec_inj, BitVec.shiftLeft_and_distrib]
theorem ISize.shiftLeft_and {a b c : ISize} : (a &&& b) <<< c = (a <<< c) &&& (b <<< c) := by
simp [ ISize.toBitVec_inj, BitVec.shiftLeft_and_distrib]
theorem Int8.shiftLeft_or {a b c : Int8} : (a ||| b) <<< c = (a <<< c) ||| (b <<< c) := by
simp [ Int8.toBitVec_inj, BitVec.shiftLeft_or_distrib]
theorem Int16.shiftLeft_or {a b c : Int16} : (a ||| b) <<< c = (a <<< c) ||| (b <<< c) := by
simp [ Int16.toBitVec_inj, BitVec.shiftLeft_or_distrib]
theorem Int32.shiftLeft_or {a b c : Int32} : (a ||| b) <<< c = (a <<< c) ||| (b <<< c) := by
simp [ Int32.toBitVec_inj, BitVec.shiftLeft_or_distrib]
theorem Int64.shiftLeft_or {a b c : Int64} : (a ||| b) <<< c = (a <<< c) ||| (b <<< c) := by
simp [ Int64.toBitVec_inj, BitVec.shiftLeft_or_distrib]
theorem ISize.shiftLeft_or {a b c : ISize} : (a ||| b) <<< c = (a <<< c) ||| (b <<< c) := by
simp [ ISize.toBitVec_inj, BitVec.shiftLeft_or_distrib]
@[simp] theorem Int8.neg_one_shiftLeft_and_shiftLeft {a b : Int8} :
(-1) <<< b &&& a <<< b = a <<< b := by simp [ Int8.shiftLeft_and]
@[simp] theorem Int16.neg_one_shiftLeft_and_shiftLeft {a b : Int16} :
(-1) <<< b &&& a <<< b = a <<< b := by simp [ Int16.shiftLeft_and]
@[simp] theorem Int32.neg_one_shiftLeft_and_shiftLeft {a b : Int32} :
(-1) <<< b &&& a <<< b = a <<< b := by simp [ Int32.shiftLeft_and]
@[simp] theorem Int64.neg_one_shiftLeft_and_shiftLeft {a b : Int64} :
(-1) <<< b &&& a <<< b = a <<< b := by simp [ Int64.shiftLeft_and]
@[simp] theorem ISize.neg_one_shiftLeft_and_shiftLeft {a b : ISize} :
(-1) <<< b &&& a <<< b = a <<< b := by simp [ ISize.shiftLeft_and]
@[simp] theorem Int8.neg_one_shiftLeft_or_shiftLeft {a b : Int8} :
(-1) <<< b ||| a <<< b = (-1) <<< b := by simp [ Int8.shiftLeft_or]
@[simp] theorem Int16.neg_one_shiftLeft_or_shiftLeft {a b : Int16} :
(-1) <<< b ||| a <<< b = (-1) <<< b := by simp [ Int16.shiftLeft_or]
@[simp] theorem Int32.neg_one_shiftLeft_or_shiftLeft {a b : Int32} :
(-1) <<< b ||| a <<< b = (-1) <<< b := by simp [ Int32.shiftLeft_or]
@[simp] theorem Int64.neg_one_shiftLeft_or_shiftLeft {a b : Int8} :
(-1) <<< b ||| a <<< b = (-1) <<< b := by simp [ Int64.shiftLeft_or]
@[simp] theorem ISize.neg_one_shiftLeft_or_shiftLeft {a b : ISize} :
(-1) <<< b ||| a <<< b = (-1) <<< b := by simp [ ISize.shiftLeft_or]
@[simp] theorem Int8.shiftRight_zero {a : Int8} : a >>> 0 = a := by simp [ Int8.toBitVec_inj]
@[simp] theorem Int16.shiftRight_zero {a : Int16} : a >>> 0 = a := by simp [ Int16.toBitVec_inj]
@[simp] theorem Int32.shiftRight_zero {a : Int32} : a >>> 0 = a := by simp [ Int32.toBitVec_inj]
@[simp] theorem Int64.shiftRight_zero {a : Int64} : a >>> 0 = a := by simp [ Int64.toBitVec_inj]
@[simp] theorem ISize.shiftRight_zero {a : ISize} : a >>> 0 = a := by simp [ ISize.toBitVec_inj]
@[simp] theorem Int8.zero_shiftRight {a : Int8} : 0 >>> a = 0 := by simp [ Int8.toBitVec_inj]
@[simp] theorem Int16.zero_shiftRight {a : Int16} : 0 >>> a = 0 := by simp [ Int16.toBitVec_inj]
@[simp] theorem Int32.zero_shiftRight {a : Int32} : 0 >>> a = 0 := by simp [ Int32.toBitVec_inj]
@[simp] theorem Int64.zero_shiftRight {a : Int64} : 0 >>> a = 0 := by simp [ Int64.toBitVec_inj]
@[simp] theorem ISize.zero_shiftRight {a : ISize} : 0 >>> a = 0 := by simp [ ISize.toBitVec_inj]
theorem Int8.shiftRight_xor {a b c : Int8} : (a ^^^ b) >>> c = (a >>> c) ^^^ (b >>> c) := by
simp [ Int8.toBitVec_inj, BitVec.sshiftRight_xor_distrib]
theorem Int16.shiftRight_xor {a b c : Int16} : (a ^^^ b) >>> c = (a >>> c) ^^^ (b >>> c) := by
simp [ Int16.toBitVec_inj, BitVec.sshiftRight_xor_distrib]
theorem Int32.shiftRight_xor {a b c : Int32} : (a ^^^ b) >>> c = (a >>> c) ^^^ (b >>> c) := by
simp [ Int32.toBitVec_inj, BitVec.sshiftRight_xor_distrib]
theorem Int64.shiftRight_xor {a b c : Int64} : (a ^^^ b) >>> c = (a >>> c) ^^^ (b >>> c) := by
simp [ Int64.toBitVec_inj, BitVec.sshiftRight_xor_distrib]
theorem ISize.shiftRight_xor {a b c : ISize} : (a ^^^ b) >>> c = (a >>> c) ^^^ (b >>> c) := by
simp [ ISize.toBitVec_inj, BitVec.sshiftRight_xor_distrib]
theorem Int8.shiftRight_and {a b c : Int8} : (a &&& b) >>> c = (a >>> c) &&& (b >>> c) := by
simp [ Int8.toBitVec_inj, BitVec.sshiftRight_and_distrib]
theorem Int16.shiftRight_and {a b c : Int16} : (a &&& b) >>> c = (a >>> c) &&& (b >>> c) := by
simp [ Int16.toBitVec_inj, BitVec.sshiftRight_and_distrib]
theorem Int32.shiftRight_and {a b c : Int32} : (a &&& b) >>> c = (a >>> c) &&& (b >>> c) := by
simp [ Int32.toBitVec_inj, BitVec.sshiftRight_and_distrib]
theorem Int64.shiftRight_and {a b c : Int64} : (a &&& b) >>> c = (a >>> c) &&& (b >>> c) := by
simp [ Int64.toBitVec_inj, BitVec.sshiftRight_and_distrib]
theorem ISize.shiftRight_and {a b c : ISize} : (a &&& b) >>> c = (a >>> c) &&& (b >>> c) := by
simp [ ISize.toBitVec_inj, BitVec.sshiftRight_and_distrib]
theorem Int8.shiftRight_or {a b c : Int8} : (a ||| b) >>> c = (a >>> c) ||| (b >>> c) := by
simp [ Int8.toBitVec_inj, BitVec.sshiftRight_or_distrib]
theorem Int16.shiftRight_or {a b c : Int16} : (a ||| b) >>> c = (a >>> c) ||| (b >>> c) := by
simp [ Int16.toBitVec_inj, BitVec.sshiftRight_or_distrib]
theorem Int32.shiftRight_or {a b c : Int32} : (a ||| b) >>> c = (a >>> c) ||| (b >>> c) := by
simp [ Int32.toBitVec_inj, BitVec.sshiftRight_or_distrib]
theorem Int64.shiftRight_or {a b c : Int64} : (a ||| b) >>> c = (a >>> c) ||| (b >>> c) := by
simp [ Int64.toBitVec_inj, BitVec.sshiftRight_or_distrib]
theorem ISize.shiftRight_or {a b c : ISize} : (a ||| b) >>> c = (a >>> c) ||| (b >>> c) := by
simp [ ISize.toBitVec_inj, BitVec.sshiftRight_or_distrib]

File diff suppressed because it is too large Load Diff

View File

@@ -1508,20 +1508,12 @@ Checks whether the substring can be interpreted as the decimal representation of
A substring can be interpreted as a decimal natural number if it is not empty and all the characters
in it are digits.
Use `Substring.toNat?` to convert such a substring to a natural number.
Use `Substring.toNat?` to convert such a string to a natural number.
-/
@[inline] def isNat (s : Substring) : Bool :=
s.all fun c => c.isDigit
/--
Checks whether the substring can be interpreted as the decimal representation of a natural number,
returning the number if it can.
A substring can be interpreted as a decimal natural number if it is not empty and all the characters
in it are digits.
Use `Substring.isNat` to check whether the substring is such a substring.
-/
def toNat? (s : Substring) : Option Nat :=
if s.isNat then
some <| s.foldl (fun n c => n*10 + (c.toNat - '0'.toNat)) 0

View File

@@ -11,14 +11,7 @@ open Sum Subtype Nat
open Std
/--
Types that can be converted into a string for display.
There is no expectation that the resulting string can be parsed back to the original data (see
`Repr` for a similar class with this expectation).
-/
class ToString (α : Type u) where
/-- Converts a value into a string. -/
toString : α String
export ToString (toString)

View File

@@ -27,7 +27,7 @@ operator.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_uint8_add"]
protected def UInt8.add (a b : UInt8) : UInt8 := a.toBitVec + b.toBitVec
def UInt8.add (a b : UInt8) : UInt8 := a.toBitVec + b.toBitVec
/--
Subtracts one 8-bit unsigned integer from another, wrapping around on underflow. Usually accessed
via the `-` operator.
@@ -35,7 +35,7 @@ via the `-` operator.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_uint8_sub"]
protected def UInt8.sub (a b : UInt8) : UInt8 := a.toBitVec - b.toBitVec
def UInt8.sub (a b : UInt8) : UInt8 := a.toBitVec - b.toBitVec
/--
Multiplies two 8-bit unsigned integers, wrapping around on overflow. Usually accessed via the `*`
operator.
@@ -43,7 +43,7 @@ operator.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_uint8_mul"]
protected def UInt8.mul (a b : UInt8) : UInt8 := a.toBitVec * b.toBitVec
def UInt8.mul (a b : UInt8) : UInt8 := a.toBitVec * b.toBitVec
/--
Unsigned division for 8-bit unsigned integers, discarding the remainder. Usually accessed
via the `/` operator.
@@ -53,7 +53,7 @@ This operation is sometimes called “floor division.” Division by zero is def
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_uint8_div"]
protected def UInt8.div (a b : UInt8) : UInt8 := BitVec.udiv a.toBitVec b.toBitVec
def UInt8.div (a b : UInt8) : UInt8 := BitVec.udiv a.toBitVec b.toBitVec
/--
The modulo operator for 8-bit unsigned integers, which computes the remainder when dividing one
integer by another. Usually accessed via the `%` operator.
@@ -68,10 +68,10 @@ Examples:
* `UInt8.mod 4 0 = 4`
-/
@[extern "lean_uint8_mod"]
protected def UInt8.mod (a b : UInt8) : UInt8 := BitVec.umod a.toBitVec b.toBitVec
def UInt8.mod (a b : UInt8) : UInt8 := BitVec.umod a.toBitVec b.toBitVec
set_option linter.missingDocs false in
@[deprecated UInt8.mod (since := "2024-09-23")]
protected def UInt8.modn (a : UInt8) (n : Nat) : UInt8 := Fin.modn a.toFin n
def UInt8.modn (a : UInt8) (n : Nat) : UInt8 := Fin.modn a.toFin n
/--
Bitwise and for 8-bit unsigned integers. Usually accessed via the `&&&` operator.
@@ -80,7 +80,7 @@ Each bit of the resulting integer is set if the corresponding bits of both input
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_uint8_land"]
protected def UInt8.land (a b : UInt8) : UInt8 := a.toBitVec &&& b.toBitVec
def UInt8.land (a b : UInt8) : UInt8 := a.toBitVec &&& b.toBitVec
/--
Bitwise or for 8-bit unsigned integers. Usually accessed via the `|||` operator.
@@ -90,7 +90,7 @@ integers are set.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_uint8_lor"]
protected def UInt8.lor (a b : UInt8) : UInt8 := a.toBitVec ||| b.toBitVec
def UInt8.lor (a b : UInt8) : UInt8 := a.toBitVec ||| b.toBitVec
/--
Bitwise exclusive or for 8-bit unsigned integers. Usually accessed via the `^^^` operator.
@@ -100,31 +100,31 @@ integers are set.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_uint8_xor"]
protected def UInt8.xor (a b : UInt8) : UInt8 := a.toBitVec ^^^ b.toBitVec
def UInt8.xor (a b : UInt8) : UInt8 := a.toBitVec ^^^ b.toBitVec
/--
Bitwise left shift for 8-bit unsigned integers. Usually accessed via the `<<<` operator.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_uint8_shift_left"]
protected def UInt8.shiftLeft (a b : UInt8) : UInt8 := a.toBitVec <<< (UInt8.mod b 8).toBitVec
def UInt8.shiftLeft (a b : UInt8) : UInt8 := a.toBitVec <<< (mod b 8).toBitVec
/--
Bitwise right shift for 8-bit unsigned integers. Usually accessed via the `>>>` operator.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_uint8_shift_right"]
protected def UInt8.shiftRight (a b : UInt8) : UInt8 := a.toBitVec >>> (UInt8.mod b 8).toBitVec
def UInt8.shiftRight (a b : UInt8) : UInt8 := a.toBitVec >>> (mod b 8).toBitVec
/--
Strict inequality of 8-bit unsigned integers, defined as inequality of the corresponding
natural numbers. Usually accessed via the `<` operator.
-/
protected def UInt8.lt (a b : UInt8) : Prop := a.toBitVec < b.toBitVec
def UInt8.lt (a b : UInt8) : Prop := a.toBitVec < b.toBitVec
/--
Non-strict inequality of 8-bit unsigned integers, defined as inequality of the corresponding
natural numbers. Usually accessed via the `≤` operator.
-/
protected def UInt8.le (a b : UInt8) : Prop := a.toBitVec b.toBitVec
def UInt8.le (a b : UInt8) : Prop := a.toBitVec b.toBitVec
instance : Add UInt8 := UInt8.add
instance : Sub UInt8 := UInt8.sub
@@ -147,7 +147,7 @@ Each bit of the resulting integer is the opposite of the corresponding bit of th
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_uint8_complement"]
protected def UInt8.complement (a : UInt8) : UInt8 := ~~~a.toBitVec
def UInt8.complement (a : UInt8) : UInt8 := ~~~a.toBitVec
/--
Negation of 8-bit unsigned integers, computed modulo `UInt8.size`.
@@ -156,7 +156,7 @@ Negation of 8-bit unsigned integers, computed modulo `UInt8.size`.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_uint8_neg"]
protected def UInt8.neg (a : UInt8) : UInt8 := -a.toBitVec
def UInt8.neg (a : UInt8) : UInt8 := -a.toBitVec
instance : Complement UInt8 := UInt8.complement
instance : Neg UInt8 := UInt8.neg
@@ -224,7 +224,7 @@ operator.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_uint16_add"]
protected def UInt16.add (a b : UInt16) : UInt16 := a.toBitVec + b.toBitVec
def UInt16.add (a b : UInt16) : UInt16 := a.toBitVec + b.toBitVec
/--
Subtracts one 16-bit unsigned integer from another, wrapping around on underflow. Usually accessed
via the `-` operator.
@@ -232,7 +232,7 @@ via the `-` operator.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_uint16_sub"]
protected def UInt16.sub (a b : UInt16) : UInt16 := a.toBitVec - b.toBitVec
def UInt16.sub (a b : UInt16) : UInt16 := a.toBitVec - b.toBitVec
/--
Multiplies two 16-bit unsigned integers, wrapping around on overflow. Usually accessed via the `*`
operator.
@@ -240,7 +240,7 @@ operator.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_uint16_mul"]
protected def UInt16.mul (a b : UInt16) : UInt16 := a.toBitVec * b.toBitVec
def UInt16.mul (a b : UInt16) : UInt16 := a.toBitVec * b.toBitVec
/--
Unsigned division for 16-bit unsigned integers, discarding the remainder. Usually accessed
via the `/` operator.
@@ -250,7 +250,7 @@ This operation is sometimes called “floor division.” Division by zero is def
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_uint16_div"]
protected def UInt16.div (a b : UInt16) : UInt16 := BitVec.udiv a.toBitVec b.toBitVec
def UInt16.div (a b : UInt16) : UInt16 := BitVec.udiv a.toBitVec b.toBitVec
/--
The modulo operator for 16-bit unsigned integers, which computes the remainder when dividing one
integer by another. Usually accessed via the `%` operator.
@@ -265,10 +265,10 @@ Examples:
* `UInt16.mod 4 0 = 4`
-/
@[extern "lean_uint16_mod"]
protected def UInt16.mod (a b : UInt16) : UInt16 := BitVec.umod a.toBitVec b.toBitVec
def UInt16.mod (a b : UInt16) : UInt16 := BitVec.umod a.toBitVec b.toBitVec
set_option linter.missingDocs false in
@[deprecated UInt16.mod (since := "2024-09-23")]
protected def UInt16.modn (a : UInt16) (n : Nat) : UInt16 := Fin.modn a.toFin n
def UInt16.modn (a : UInt16) (n : Nat) : UInt16 := Fin.modn a.toFin n
/--
Bitwise and for 16-bit unsigned integers. Usually accessed via the `&&&` operator.
@@ -277,7 +277,7 @@ Each bit of the resulting integer is set if the corresponding bits of both input
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_uint16_land"]
protected def UInt16.land (a b : UInt16) : UInt16 := a.toBitVec &&& b.toBitVec
def UInt16.land (a b : UInt16) : UInt16 := a.toBitVec &&& b.toBitVec
/--
Bitwise or for 16-bit unsigned integers. Usually accessed via the `|||` operator.
@@ -287,7 +287,7 @@ integers are set.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_uint16_lor"]
protected def UInt16.lor (a b : UInt16) : UInt16 := a.toBitVec ||| b.toBitVec
def UInt16.lor (a b : UInt16) : UInt16 := a.toBitVec ||| b.toBitVec
/--
Bitwise exclusive or for 8-bit unsigned integers. Usually accessed via the `^^^` operator.
@@ -297,31 +297,31 @@ integers are set.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_uint16_xor"]
protected def UInt16.xor (a b : UInt16) : UInt16 := a.toBitVec ^^^ b.toBitVec
def UInt16.xor (a b : UInt16) : UInt16 := a.toBitVec ^^^ b.toBitVec
/--
Bitwise left shift for 16-bit unsigned integers. Usually accessed via the `<<<` operator.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_uint16_shift_left"]
protected def UInt16.shiftLeft (a b : UInt16) : UInt16 := a.toBitVec <<< (UInt16.mod b 16).toBitVec
def UInt16.shiftLeft (a b : UInt16) : UInt16 := a.toBitVec <<< (mod b 16).toBitVec
/--
Bitwise right shift for 16-bit unsigned integers. Usually accessed via the `>>>` operator.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_uint16_shift_right"]
protected def UInt16.shiftRight (a b : UInt16) : UInt16 := a.toBitVec >>> (UInt16.mod b 16).toBitVec
def UInt16.shiftRight (a b : UInt16) : UInt16 := a.toBitVec >>> (mod b 16).toBitVec
/--
Strict inequality of 16-bit unsigned integers, defined as inequality of the corresponding
natural numbers. Usually accessed via the `<` operator.
-/
protected def UInt16.lt (a b : UInt16) : Prop := a.toBitVec < b.toBitVec
def UInt16.lt (a b : UInt16) : Prop := a.toBitVec < b.toBitVec
/--
Non-strict inequality of 16-bit unsigned integers, defined as inequality of the corresponding
natural numbers. Usually accessed via the `≤` operator.
-/
protected def UInt16.le (a b : UInt16) : Prop := a.toBitVec b.toBitVec
def UInt16.le (a b : UInt16) : Prop := a.toBitVec b.toBitVec
instance : Add UInt16 := UInt16.add
instance : Sub UInt16 := UInt16.sub
@@ -344,7 +344,7 @@ Each bit of the resulting integer is the opposite of the corresponding bit of th
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_uint16_complement"]
protected def UInt16.complement (a : UInt16) : UInt16 := ~~~a.toBitVec
def UInt16.complement (a : UInt16) : UInt16 := ~~~a.toBitVec
/--
Negation of 16-bit unsigned integers, computed modulo `UInt16.size`.
@@ -353,7 +353,7 @@ Negation of 16-bit unsigned integers, computed modulo `UInt16.size`.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_uint16_neg"]
protected def UInt16.neg (a : UInt16) : UInt16 := -a.toBitVec
def UInt16.neg (a : UInt16) : UInt16 := -a.toBitVec
instance : Complement UInt16 := UInt16.complement
instance : Neg UInt16 := UInt16.neg
@@ -423,7 +423,7 @@ operator.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_uint32_add"]
protected def UInt32.add (a b : UInt32) : UInt32 := a.toBitVec + b.toBitVec
def UInt32.add (a b : UInt32) : UInt32 := a.toBitVec + b.toBitVec
/--
Subtracts one 32-bit unsigned integer from another, wrapping around on underflow. Usually accessed
via the `-` operator.
@@ -431,7 +431,7 @@ via the `-` operator.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_uint32_sub"]
protected def UInt32.sub (a b : UInt32) : UInt32 := a.toBitVec - b.toBitVec
def UInt32.sub (a b : UInt32) : UInt32 := a.toBitVec - b.toBitVec
/--
Multiplies two 32-bit unsigned integers, wrapping around on overflow. Usually accessed via the `*`
operator.
@@ -439,7 +439,7 @@ operator.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_uint32_mul"]
protected def UInt32.mul (a b : UInt32) : UInt32 := a.toBitVec * b.toBitVec
def UInt32.mul (a b : UInt32) : UInt32 := a.toBitVec * b.toBitVec
/--
Unsigned division for 32-bit unsigned integers, discarding the remainder. Usually accessed
via the `/` operator.
@@ -449,7 +449,7 @@ This operation is sometimes called “floor division.” Division by zero is def
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_uint32_div"]
protected def UInt32.div (a b : UInt32) : UInt32 := BitVec.udiv a.toBitVec b.toBitVec
def UInt32.div (a b : UInt32) : UInt32 := BitVec.udiv a.toBitVec b.toBitVec
/--
The modulo operator for 32-bit unsigned integers, which computes the remainder when dividing one
integer by another. Usually accessed via the `%` operator.
@@ -464,10 +464,10 @@ Examples:
* `UInt32.mod 4 0 = 4`
-/
@[extern "lean_uint32_mod"]
protected def UInt32.mod (a b : UInt32) : UInt32 := BitVec.umod a.toBitVec b.toBitVec
def UInt32.mod (a b : UInt32) : UInt32 := BitVec.umod a.toBitVec b.toBitVec
set_option linter.missingDocs false in
@[deprecated UInt32.mod (since := "2024-09-23")]
protected def UInt32.modn (a : UInt32) (n : Nat) : UInt32 := Fin.modn a.toFin n
def UInt32.modn (a : UInt32) (n : Nat) : UInt32 := Fin.modn a.toFin n
/--
Bitwise and for 32-bit unsigned integers. Usually accessed via the `&&&` operator.
@@ -476,7 +476,7 @@ Each bit of the resulting integer is set if the corresponding bits of both input
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_uint32_land"]
protected def UInt32.land (a b : UInt32) : UInt32 := a.toBitVec &&& b.toBitVec
def UInt32.land (a b : UInt32) : UInt32 := a.toBitVec &&& b.toBitVec
/--
Bitwise or for 32-bit unsigned integers. Usually accessed via the `|||` operator.
@@ -486,7 +486,7 @@ integers are set.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_uint32_lor"]
protected def UInt32.lor (a b : UInt32) : UInt32 := a.toBitVec ||| b.toBitVec
def UInt32.lor (a b : UInt32) : UInt32 := a.toBitVec ||| b.toBitVec
/--
Bitwise exclusive or for 32-bit unsigned integers. Usually accessed via the `^^^` operator.
@@ -496,31 +496,31 @@ integers are set.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_uint32_xor"]
protected def UInt32.xor (a b : UInt32) : UInt32 := a.toBitVec ^^^ b.toBitVec
def UInt32.xor (a b : UInt32) : UInt32 := a.toBitVec ^^^ b.toBitVec
/--
Bitwise left shift for 32-bit unsigned integers. Usually accessed via the `<<<` operator.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_uint32_shift_left"]
protected def UInt32.shiftLeft (a b : UInt32) : UInt32 := a.toBitVec <<< (UInt32.mod b 32).toBitVec
def UInt32.shiftLeft (a b : UInt32) : UInt32 := a.toBitVec <<< (mod b 32).toBitVec
/--
Bitwise right shift for 32-bit unsigned integers. Usually accessed via the `>>>` operator.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_uint32_shift_right"]
protected def UInt32.shiftRight (a b : UInt32) : UInt32 := a.toBitVec >>> (UInt32.mod b 32).toBitVec
def UInt32.shiftRight (a b : UInt32) : UInt32 := a.toBitVec >>> (mod b 32).toBitVec
/--
Strict inequality of 32-bit unsigned integers, defined as inequality of the corresponding
natural numbers. Usually accessed via the `<` operator.
-/
protected def UInt32.lt (a b : UInt32) : Prop := a.toBitVec < b.toBitVec
def UInt32.lt (a b : UInt32) : Prop := a.toBitVec < b.toBitVec
/--
Non-strict inequality of 32-bit unsigned integers, defined as inequality of the corresponding
natural numbers. Usually accessed via the `≤` operator.
-/
protected def UInt32.le (a b : UInt32) : Prop := a.toBitVec b.toBitVec
def UInt32.le (a b : UInt32) : Prop := a.toBitVec b.toBitVec
instance : Add UInt32 := UInt32.add
instance : Sub UInt32 := UInt32.sub
@@ -543,7 +543,7 @@ Each bit of the resulting integer is the opposite of the corresponding bit of th
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_uint32_complement"]
protected def UInt32.complement (a : UInt32) : UInt32 := ~~~a.toBitVec
def UInt32.complement (a : UInt32) : UInt32 := ~~~a.toBitVec
/--
Negation of 32-bit unsigned integers, computed modulo `UInt32.size`.
@@ -552,7 +552,7 @@ Negation of 32-bit unsigned integers, computed modulo `UInt32.size`.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_uint32_neg"]
protected def UInt32.neg (a : UInt32) : UInt32 := -a.toBitVec
def UInt32.neg (a : UInt32) : UInt32 := -a.toBitVec
instance : Complement UInt32 := UInt32.complement
instance : Neg UInt32 := UInt32.neg
@@ -584,7 +584,7 @@ operator.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_uint64_add"]
protected def UInt64.add (a b : UInt64) : UInt64 := a.toBitVec + b.toBitVec
def UInt64.add (a b : UInt64) : UInt64 := a.toBitVec + b.toBitVec
/--
Subtracts one 64-bit unsigned integer from another, wrapping around on underflow. Usually accessed
via the `-` operator.
@@ -592,7 +592,7 @@ via the `-` operator.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_uint64_sub"]
protected def UInt64.sub (a b : UInt64) : UInt64 := a.toBitVec - b.toBitVec
def UInt64.sub (a b : UInt64) : UInt64 := a.toBitVec - b.toBitVec
/--
Multiplies two 64-bit unsigned integers, wrapping around on overflow. Usually accessed via the `*`
operator.
@@ -600,7 +600,7 @@ operator.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_uint64_mul"]
protected def UInt64.mul (a b : UInt64) : UInt64 := a.toBitVec * b.toBitVec
def UInt64.mul (a b : UInt64) : UInt64 := a.toBitVec * b.toBitVec
/--
Unsigned division for 64-bit unsigned integers, discarding the remainder. Usually accessed
via the `/` operator.
@@ -610,7 +610,7 @@ This operation is sometimes called “floor division.” Division by zero is def
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_uint64_div"]
protected def UInt64.div (a b : UInt64) : UInt64 := BitVec.udiv a.toBitVec b.toBitVec
def UInt64.div (a b : UInt64) : UInt64 := BitVec.udiv a.toBitVec b.toBitVec
/--
The modulo operator for 64-bit unsigned integers, which computes the remainder when dividing one
integer by another. Usually accessed via the `%` operator.
@@ -625,10 +625,10 @@ Examples:
* `UInt64.mod 4 0 = 4`
-/
@[extern "lean_uint64_mod"]
protected def UInt64.mod (a b : UInt64) : UInt64 := BitVec.umod a.toBitVec b.toBitVec
def UInt64.mod (a b : UInt64) : UInt64 := BitVec.umod a.toBitVec b.toBitVec
set_option linter.missingDocs false in
@[deprecated UInt64.mod (since := "2024-09-23")]
protected def UInt64.modn (a : UInt64) (n : Nat) : UInt64 := Fin.modn a.toFin n
def UInt64.modn (a : UInt64) (n : Nat) : UInt64 := Fin.modn a.toFin n
/--
Bitwise and for 64-bit unsigned integers. Usually accessed via the `&&&` operator.
@@ -637,7 +637,7 @@ Each bit of the resulting integer is set if the corresponding bits of both input
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_uint64_land"]
protected def UInt64.land (a b : UInt64) : UInt64 := a.toBitVec &&& b.toBitVec
def UInt64.land (a b : UInt64) : UInt64 := a.toBitVec &&& b.toBitVec
/--
Bitwise or for 64-bit unsigned integers. Usually accessed via the `|||` operator.
@@ -647,7 +647,7 @@ integers are set.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_uint64_lor"]
protected def UInt64.lor (a b : UInt64) : UInt64 := a.toBitVec ||| b.toBitVec
def UInt64.lor (a b : UInt64) : UInt64 := a.toBitVec ||| b.toBitVec
/--
Bitwise exclusive or for 64-bit unsigned integers. Usually accessed via the `^^^` operator.
@@ -657,31 +657,31 @@ integers are set.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_uint64_xor"]
protected def UInt64.xor (a b : UInt64) : UInt64 := a.toBitVec ^^^ b.toBitVec
def UInt64.xor (a b : UInt64) : UInt64 := a.toBitVec ^^^ b.toBitVec
/--
Bitwise left shift for 64-bit unsigned integers. Usually accessed via the `<<<` operator.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_uint64_shift_left"]
protected def UInt64.shiftLeft (a b : UInt64) : UInt64 := a.toBitVec <<< (UInt64.mod b 64).toBitVec
def UInt64.shiftLeft (a b : UInt64) : UInt64 := a.toBitVec <<< (mod b 64).toBitVec
/--
Bitwise right shift for 64-bit unsigned integers. Usually accessed via the `>>>` operator.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_uint64_shift_right"]
protected def UInt64.shiftRight (a b : UInt64) : UInt64 := a.toBitVec >>> (UInt64.mod b 64).toBitVec
def UInt64.shiftRight (a b : UInt64) : UInt64 := a.toBitVec >>> (mod b 64).toBitVec
/--
Strict inequality of 64-bit unsigned integers, defined as inequality of the corresponding
natural numbers. Usually accessed via the `<` operator.
-/
protected def UInt64.lt (a b : UInt64) : Prop := a.toBitVec < b.toBitVec
def UInt64.lt (a b : UInt64) : Prop := a.toBitVec < b.toBitVec
/--
Non-strict inequality of 64-bit unsigned integers, defined as inequality of the corresponding
natural numbers. Usually accessed via the `≤` operator.
-/
protected def UInt64.le (a b : UInt64) : Prop := a.toBitVec b.toBitVec
def UInt64.le (a b : UInt64) : Prop := a.toBitVec b.toBitVec
instance : Add UInt64 := UInt64.add
instance : Sub UInt64 := UInt64.sub
@@ -704,7 +704,7 @@ Each bit of the resulting integer is the opposite of the corresponding bit of th
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_uint64_complement"]
protected def UInt64.complement (a : UInt64) : UInt64 := ~~~a.toBitVec
def UInt64.complement (a : UInt64) : UInt64 := ~~~a.toBitVec
/--
Negation of 32-bit unsigned integers, computed modulo `UInt64.size`.
@@ -713,7 +713,7 @@ Negation of 32-bit unsigned integers, computed modulo `UInt64.size`.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_uint64_neg"]
protected def UInt64.neg (a : UInt64) : UInt64 := -a.toBitVec
def UInt64.neg (a : UInt64) : UInt64 := -a.toBitVec
instance : Complement UInt64 := UInt64.complement
instance : Neg UInt64 := UInt64.neg
@@ -792,7 +792,7 @@ Multiplies two word-sized unsigned integers, wrapping around on overflow. Usual
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_usize_mul"]
protected def USize.mul (a b : USize) : USize := a.toBitVec * b.toBitVec
def USize.mul (a b : USize) : USize := a.toBitVec * b.toBitVec
/--
Unsigned division for word-sized unsigned integers, discarding the remainder. Usually accessed
via the `/` operator.
@@ -802,7 +802,7 @@ This operation is sometimes called “floor division.” Division by zero is def
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_usize_div"]
protected def USize.div (a b : USize) : USize := a.toBitVec / b.toBitVec
def USize.div (a b : USize) : USize := a.toBitVec / b.toBitVec
/--
The modulo operator for word-sized unsigned integers, which computes the remainder when dividing one
integer by another. Usually accessed via the `%` operator.
@@ -817,10 +817,10 @@ Examples:
* `USize.mod 4 0 = 4`
-/
@[extern "lean_usize_mod"]
protected def USize.mod (a b : USize) : USize := a.toBitVec % b.toBitVec
def USize.mod (a b : USize) : USize := a.toBitVec % b.toBitVec
set_option linter.missingDocs false in
@[deprecated USize.mod (since := "2024-09-23")]
protected def USize.modn (a : USize) (n : Nat) : USize := Fin.modn a.toFin n
def USize.modn (a : USize) (n : Nat) : USize := Fin.modn a.toFin n
/--
Bitwise and for word-sized unsigned integers. Usually accessed via the `&&&` operator.
@@ -829,7 +829,7 @@ Each bit of the resulting integer is set if the corresponding bits of both input
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_usize_land"]
protected def USize.land (a b : USize) : USize := a.toBitVec &&& b.toBitVec
def USize.land (a b : USize) : USize := a.toBitVec &&& b.toBitVec
/--
Bitwise or for word-sized unsigned integers. Usually accessed via the `|||` operator.
@@ -839,7 +839,7 @@ integers are set.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_usize_lor"]
protected def USize.lor (a b : USize) : USize := a.toBitVec ||| b.toBitVec
def USize.lor (a b : USize) : USize := a.toBitVec ||| b.toBitVec
/--
Bitwise exclusive or for word-sized unsigned integers. Usually accessed via the `^^^` operator.
@@ -849,21 +849,21 @@ integers are set.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_usize_xor"]
protected def USize.xor (a b : USize) : USize := a.toBitVec ^^^ b.toBitVec
def USize.xor (a b : USize) : USize := a.toBitVec ^^^ b.toBitVec
/--
Bitwise left shift for word-sized unsigned integers. Usually accessed via the `<<<` operator.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_usize_shift_left"]
protected def USize.shiftLeft (a b : USize) : USize := a.toBitVec <<< (USize.mod b (USize.ofNat System.Platform.numBits)).toBitVec
def USize.shiftLeft (a b : USize) : USize := a.toBitVec <<< (mod b (USize.ofNat System.Platform.numBits)).toBitVec
/--
Bitwise right shift for word-sized unsigned integers. Usually accessed via the `>>>` operator.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_usize_shift_right"]
protected def USize.shiftRight (a b : USize) : USize := a.toBitVec >>> (USize.mod b (USize.ofNat System.Platform.numBits)).toBitVec
def USize.shiftRight (a b : USize) : USize := a.toBitVec >>> (mod b (USize.ofNat System.Platform.numBits)).toBitVec
/--
Converts a natural number to a `USize`. Overflow is impossible on any supported platform because
`USize.size` is either `2^32` or `2^64`.
@@ -953,14 +953,14 @@ Each bit of the resulting integer is the opposite of the corresponding bit of th
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_usize_complement"]
protected def USize.complement (a : USize) : USize := ~~~a.toBitVec
def USize.complement (a : USize) : USize := ~~~a.toBitVec
/--
Negation of word-sized unsigned integers, computed modulo `USize.size`.
This function is overridden at runtime with an efficient implementation.
-/
@[extern "lean_usize_neg"]
protected def USize.neg (a : USize) : USize := -a.toBitVec
def USize.neg (a : USize) : USize := -a.toBitVec
instance : Complement USize := USize.complement
instance : Neg USize := USize.neg

View File

@@ -229,6 +229,7 @@ theorem Bool.toBitVec_toUSize {b : Bool} :
@[simp] theorem USize.toFin_shiftLeft (a b : USize) (hb : b.toNat < System.Platform.numBits) : (a <<< b).toFin = a.toFin <<< b.toFin :=
Fin.val_inj.1 (by simp [Nat.mod_eq_of_lt (a := b.toNat) (b := System.Platform.numBits) hb])
theorem UInt8.shiftLeft_eq_shiftLeft_mod (a b : UInt8) : a <<< b = a <<< (b % 8) := UInt8.toBitVec_inj.1 (by simp)
theorem UInt16.shiftLeft_eq_shiftLeft_mod (a b : UInt16) : a <<< b = a <<< (b % 16) := UInt16.toBitVec_inj.1 (by simp)
theorem UInt32.shiftLeft_eq_shiftLeft_mod (a b : UInt32) : a <<< b = a <<< (b % 32) := UInt32.toBitVec_inj.1 (by simp)
@@ -422,904 +423,3 @@ theorem USize.toUInt64_shiftLeft_of_lt (a b : USize) (hb : b.toNat < System.Plat
There is no reasonable statement for`UInt16.toUInt8_shiftRight`; in fact for `a b : UInt16` the
expression `(a >>> b).toUInt8` is not a function of `a.toUInt8` and `b.toUInt8`.
-/
@[simp] theorem UInt8.ofFin_and (a b : Fin UInt8.size) : UInt8.ofFin (a &&& b) = UInt8.ofFin a &&& UInt8.ofFin b := UInt8.toFin_inj.1 (by simp)
@[simp] theorem UInt16.ofFin_and (a b : Fin UInt16.size) : UInt16.ofFin (a &&& b) = UInt16.ofFin a &&& UInt16.ofFin b := UInt16.toFin_inj.1 (by simp)
@[simp] theorem UInt32.ofFin_and (a b : Fin UInt32.size) : UInt32.ofFin (a &&& b) = UInt32.ofFin a &&& UInt32.ofFin b := UInt32.toFin_inj.1 (by simp)
@[simp] theorem UInt64.ofFin_and (a b : Fin UInt64.size) : UInt64.ofFin (a &&& b) = UInt64.ofFin a &&& UInt64.ofFin b := UInt64.toFin_inj.1 (by simp)
@[simp] theorem USize.ofFin_and (a b : Fin USize.size) : USize.ofFin (a &&& b) = USize.ofFin a &&& USize.ofFin b := USize.toFin_inj.1 (by simp)
@[simp] theorem UInt8.ofBitVec_and (a b : BitVec 8) : UInt8.ofBitVec (a &&& b) = UInt8.ofBitVec a &&& UInt8.ofBitVec b := rfl
@[simp] theorem UInt16.ofBitVec_and (a b : BitVec 16) : UInt16.ofBitVec (a &&& b) = UInt16.ofBitVec a &&& UInt16.ofBitVec b := rfl
@[simp] theorem UInt32.ofBitVec_and (a b : BitVec 32) : UInt32.ofBitVec (a &&& b) = UInt32.ofBitVec a &&& UInt32.ofBitVec b := rfl
@[simp] theorem UInt64.ofBitVec_and (a b : BitVec 64) : UInt64.ofBitVec (a &&& b) = UInt64.ofBitVec a &&& UInt64.ofBitVec b := rfl
@[simp] theorem USize.ofBitVec_and (a b : BitVec System.Platform.numBits) : USize.ofBitVec (a &&& b) = USize.ofBitVec a &&& USize.ofBitVec b := rfl
@[simp] theorem UInt8.ofNat_and (a b : Nat) : UInt8.ofNat (a &&& b) = UInt8.ofNat a &&& UInt8.ofNat b :=
UInt8.toBitVec_inj.1 (by simp [UInt8.toBitVec_ofNat'])
@[simp] theorem UInt16.ofNat_and (a b : Nat) : UInt16.ofNat (a &&& b) = UInt16.ofNat a &&& UInt16.ofNat b :=
UInt16.toBitVec_inj.1 (by simp [UInt16.toBitVec_ofNat'])
@[simp] theorem UInt32.ofNat_and (a b : Nat) : UInt32.ofNat (a &&& b) = UInt32.ofNat a &&& UInt32.ofNat b :=
UInt32.toBitVec_inj.1 (by simp [UInt32.toBitVec_ofNat'])
@[simp] theorem UInt64.ofNat_and (a b : Nat) : UInt64.ofNat (a &&& b) = UInt64.ofNat a &&& UInt64.ofNat b :=
UInt64.toBitVec_inj.1 (by simp [UInt64.toBitVec_ofNat'])
@[simp] theorem USize.ofNat_and (a b : Nat) : USize.ofNat (a &&& b) = USize.ofNat a &&& USize.ofNat b :=
USize.toBitVec_inj.1 (by simp [USize.toBitVec_ofNat'])
@[simp] theorem UInt8.ofNatLT_and (a b : Nat) (ha : a < 2 ^ 8) (hb : b < 2 ^ 8) :
UInt8.ofNatLT (a &&& b) (Nat.and_lt_two_pow _ hb) = UInt8.ofNatLT a ha &&& UInt8.ofNatLT b hb := by
simp [UInt8.ofNatLT_eq_ofNat]
@[simp] theorem UInt16.ofNatLT_and (a b : Nat) (ha : a < 2 ^ 16) (hb : b < 2 ^ 16) :
UInt16.ofNatLT (a &&& b) (Nat.and_lt_two_pow _ hb) = UInt16.ofNatLT a ha &&& UInt16.ofNatLT b hb := by
simp [UInt16.ofNatLT_eq_ofNat]
@[simp] theorem UInt32.ofNatLT_and (a b : Nat) (ha : a < 2 ^ 32) (hb : b < 2 ^ 32) :
UInt32.ofNatLT (a &&& b) (Nat.and_lt_two_pow _ hb) = UInt32.ofNatLT a ha &&& UInt32.ofNatLT b hb := by
simp [UInt32.ofNatLT_eq_ofNat]
@[simp] theorem UInt64.ofNatLT_and (a b : Nat) (ha : a < 2 ^ 64) (hb : b < 2 ^ 64) :
UInt64.ofNatLT (a &&& b) (Nat.and_lt_two_pow _ hb) = UInt64.ofNatLT a ha &&& UInt64.ofNatLT b hb := by
simp [UInt64.ofNatLT_eq_ofNat]
@[simp] theorem UInt8.ofFin_or (a b : Fin UInt8.size) : UInt8.ofFin (a ||| b) = UInt8.ofFin a ||| UInt8.ofFin b := UInt8.toFin_inj.1 (by simp)
@[simp] theorem UInt16.ofFin_or (a b : Fin UInt16.size) : UInt16.ofFin (a ||| b) = UInt16.ofFin a ||| UInt16.ofFin b := UInt16.toFin_inj.1 (by simp)
@[simp] theorem UInt32.ofFin_or (a b : Fin UInt32.size) : UInt32.ofFin (a ||| b) = UInt32.ofFin a ||| UInt32.ofFin b := UInt32.toFin_inj.1 (by simp)
@[simp] theorem UInt64.ofFin_or (a b : Fin UInt64.size) : UInt64.ofFin (a ||| b) = UInt64.ofFin a ||| UInt64.ofFin b := UInt64.toFin_inj.1 (by simp)
@[simp] theorem USize.ofFin_or (a b : Fin USize.size) : USize.ofFin (a ||| b) = USize.ofFin a ||| USize.ofFin b := USize.toFin_inj.1 (by simp)
@[simp] theorem UInt8.ofBitVec_or (a b : BitVec 8) : UInt8.ofBitVec (a ||| b) = UInt8.ofBitVec a ||| UInt8.ofBitVec b := rfl
@[simp] theorem UInt16.ofBitVec_or (a b : BitVec 16) : UInt16.ofBitVec (a ||| b) = UInt16.ofBitVec a ||| UInt16.ofBitVec b := rfl
@[simp] theorem UInt32.ofBitVec_or (a b : BitVec 32) : UInt32.ofBitVec (a ||| b) = UInt32.ofBitVec a ||| UInt32.ofBitVec b := rfl
@[simp] theorem UInt64.ofBitVec_or (a b : BitVec 64) : UInt64.ofBitVec (a ||| b) = UInt64.ofBitVec a ||| UInt64.ofBitVec b := rfl
@[simp] theorem USize.ofBitVec_or (a b : BitVec System.Platform.numBits) : USize.ofBitVec (a ||| b) = USize.ofBitVec a ||| USize.ofBitVec b := rfl
@[simp] theorem UInt8.ofNat_or (a b : Nat) : UInt8.ofNat (a ||| b) = UInt8.ofNat a ||| UInt8.ofNat b :=
UInt8.toBitVec_inj.1 (by simp [UInt8.toBitVec_ofNat'])
@[simp] theorem UInt16.ofNat_or (a b : Nat) : UInt16.ofNat (a ||| b) = UInt16.ofNat a ||| UInt16.ofNat b :=
UInt16.toBitVec_inj.1 (by simp [UInt16.toBitVec_ofNat'])
@[simp] theorem UInt32.ofNat_or (a b : Nat) : UInt32.ofNat (a ||| b) = UInt32.ofNat a ||| UInt32.ofNat b :=
UInt32.toBitVec_inj.1 (by simp [UInt32.toBitVec_ofNat'])
@[simp] theorem UInt64.ofNat_or (a b : Nat) : UInt64.ofNat (a ||| b) = UInt64.ofNat a ||| UInt64.ofNat b :=
UInt64.toBitVec_inj.1 (by simp [UInt64.toBitVec_ofNat'])
@[simp] theorem USize.ofNat_or (a b : Nat) : USize.ofNat (a ||| b) = USize.ofNat a ||| USize.ofNat b :=
USize.toBitVec_inj.1 (by simp [USize.toBitVec_ofNat'])
@[simp] theorem UInt8.ofNatLT_or (a b : Nat) (ha : a < 2 ^ 8) (hb : b < 2 ^ 8) :
UInt8.ofNatLT (a ||| b) (Nat.or_lt_two_pow ha hb) = UInt8.ofNatLT a ha ||| UInt8.ofNatLT b hb := by
simp [UInt8.ofNatLT_eq_ofNat]
@[simp] theorem UInt16.ofNatLT_or (a b : Nat) (ha : a < 2 ^ 16) (hb : b < 2 ^ 16) :
UInt16.ofNatLT (a ||| b) (Nat.or_lt_two_pow ha hb) = UInt16.ofNatLT a ha ||| UInt16.ofNatLT b hb := by
simp [UInt16.ofNatLT_eq_ofNat]
@[simp] theorem UInt32.ofNatLT_or (a b : Nat) (ha : a < 2 ^ 32) (hb : b < 2 ^ 32) :
UInt32.ofNatLT (a ||| b) (Nat.or_lt_two_pow ha hb) = UInt32.ofNatLT a ha ||| UInt32.ofNatLT b hb := by
simp [UInt32.ofNatLT_eq_ofNat]
@[simp] theorem UInt64.ofNatLT_or (a b : Nat) (ha : a < 2 ^ 64) (hb : b < 2 ^ 64) :
UInt64.ofNatLT (a ||| b) (Nat.or_lt_two_pow ha hb) = UInt64.ofNatLT a ha ||| UInt64.ofNatLT b hb := by
simp [UInt64.ofNatLT_eq_ofNat]
@[simp] theorem UInt8.ofFin_xor (a b : Fin UInt8.size) : UInt8.ofFin (a ^^^ b) = UInt8.ofFin a ^^^ UInt8.ofFin b := UInt8.toFin_inj.1 (by simp)
@[simp] theorem UInt16.ofFin_xor (a b : Fin UInt16.size) : UInt16.ofFin (a ^^^ b) = UInt16.ofFin a ^^^ UInt16.ofFin b := UInt16.toFin_inj.1 (by simp)
@[simp] theorem UInt32.ofFin_xor (a b : Fin UInt32.size) : UInt32.ofFin (a ^^^ b) = UInt32.ofFin a ^^^ UInt32.ofFin b := UInt32.toFin_inj.1 (by simp)
@[simp] theorem UInt64.ofFin_xor (a b : Fin UInt64.size) : UInt64.ofFin (a ^^^ b) = UInt64.ofFin a ^^^ UInt64.ofFin b := UInt64.toFin_inj.1 (by simp)
@[simp] theorem USize.ofFin_xor (a b : Fin USize.size) : USize.ofFin (a ^^^ b) = USize.ofFin a ^^^ USize.ofFin b := USize.toFin_inj.1 (by simp)
@[simp] theorem UInt8.ofBitVec_xor (a b : BitVec 8) : UInt8.ofBitVec (a ^^^ b) = UInt8.ofBitVec a ^^^ UInt8.ofBitVec b := rfl
@[simp] theorem UInt16.ofBitVec_xor (a b : BitVec 16) : UInt16.ofBitVec (a ^^^ b) = UInt16.ofBitVec a ^^^ UInt16.ofBitVec b := rfl
@[simp] theorem UInt32.ofBitVec_xor (a b : BitVec 32) : UInt32.ofBitVec (a ^^^ b) = UInt32.ofBitVec a ^^^ UInt32.ofBitVec b := rfl
@[simp] theorem UInt64.ofBitVec_xor (a b : BitVec 64) : UInt64.ofBitVec (a ^^^ b) = UInt64.ofBitVec a ^^^ UInt64.ofBitVec b := rfl
@[simp] theorem USize.ofBitVec_xor (a b : BitVec System.Platform.numBits) : USize.ofBitVec (a ^^^ b) = USize.ofBitVec a ^^^ USize.ofBitVec b := rfl
@[simp] theorem UInt8.ofNat_xor (a b : Nat) : UInt8.ofNat (a ^^^ b) = UInt8.ofNat a ^^^ UInt8.ofNat b :=
UInt8.toBitVec_inj.1 (by simp [UInt8.toBitVec_ofNat'])
@[simp] theorem UInt16.ofNat_xor (a b : Nat) : UInt16.ofNat (a ^^^ b) = UInt16.ofNat a ^^^ UInt16.ofNat b :=
UInt16.toBitVec_inj.1 (by simp [UInt16.toBitVec_ofNat'])
@[simp] theorem UInt32.ofNat_xor (a b : Nat) : UInt32.ofNat (a ^^^ b) = UInt32.ofNat a ^^^ UInt32.ofNat b :=
UInt32.toBitVec_inj.1 (by simp [UInt32.toBitVec_ofNat'])
@[simp] theorem UInt64.ofNat_xor (a b : Nat) : UInt64.ofNat (a ^^^ b) = UInt64.ofNat a ^^^ UInt64.ofNat b :=
UInt64.toBitVec_inj.1 (by simp [UInt64.toBitVec_ofNat'])
@[simp] theorem USize.ofNat_xor (a b : Nat) : USize.ofNat (a ^^^ b) = USize.ofNat a ^^^ USize.ofNat b :=
USize.toBitVec_inj.1 (by simp [USize.toBitVec_ofNat'])
@[simp] theorem UInt8.ofNatLT_xor (a b : Nat) (ha : a < 2 ^ 8) (hb : b < 2 ^ 8) :
UInt8.ofNatLT (a ^^^ b) (Nat.xor_lt_two_pow ha hb) = UInt8.ofNatLT a ha ^^^ UInt8.ofNatLT b hb := by
simp [UInt8.ofNatLT_eq_ofNat]
@[simp] theorem UInt16.ofNatLT_xor (a b : Nat) (ha : a < 2 ^ 16) (hb : b < 2 ^ 16) :
UInt16.ofNatLT (a ^^^ b) (Nat.xor_lt_two_pow ha hb) = UInt16.ofNatLT a ha ^^^ UInt16.ofNatLT b hb := by
simp [UInt16.ofNatLT_eq_ofNat]
@[simp] theorem UInt32.ofNatLT_xor (a b : Nat) (ha : a < 2 ^ 32) (hb : b < 2 ^ 32) :
UInt32.ofNatLT (a ^^^ b) (Nat.xor_lt_two_pow ha hb) = UInt32.ofNatLT a ha ^^^ UInt32.ofNatLT b hb := by
simp [UInt32.ofNatLT_eq_ofNat]
@[simp] theorem UInt64.ofNatLT_xor (a b : Nat) (ha : a < 2 ^ 64) (hb : b < 2 ^ 64) :
UInt64.ofNatLT (a ^^^ b) (Nat.xor_lt_two_pow ha hb) = UInt64.ofNatLT a ha ^^^ UInt64.ofNatLT b hb := by
simp [UInt64.ofNatLT_eq_ofNat]
@[simp] theorem UInt8.ofBitVec_not (a : BitVec 8) : UInt8.ofBitVec (~~~a) = ~~~UInt8.ofBitVec a := rfl
@[simp] theorem UInt16.ofBitVec_not (a : BitVec 16) : UInt16.ofBitVec (~~~a) = ~~~UInt16.ofBitVec a := rfl
@[simp] theorem UInt32.ofBitVec_not (a : BitVec 32) : UInt32.ofBitVec (~~~a) = ~~~UInt32.ofBitVec a := rfl
@[simp] theorem UInt64.ofBitVec_not (a : BitVec 64) : UInt64.ofBitVec (~~~a) = ~~~UInt64.ofBitVec a := rfl
@[simp] theorem USize.ofBitVec_not (a : BitVec System.Platform.numBits) : USize.ofBitVec (~~~a) = ~~~USize.ofBitVec a := rfl
@[simp] theorem UInt8.ofFin_rev (a : Fin UInt8.size) : UInt8.ofFin a.rev = ~~~UInt8.ofFin a := UInt8.toFin_inj.1 (by simp)
@[simp] theorem UInt16.ofFin_rev (a : Fin UInt16.size) : UInt16.ofFin a.rev = ~~~UInt16.ofFin a := UInt16.toFin_inj.1 (by simp)
@[simp] theorem UInt32.ofFin_rev (a : Fin UInt32.size) : UInt32.ofFin a.rev = ~~~UInt32.ofFin a := UInt32.toFin_inj.1 (by simp)
@[simp] theorem UInt64.ofFin_rev (a : Fin UInt64.size) : UInt64.ofFin a.rev = ~~~UInt64.ofFin a := UInt64.toFin_inj.1 (by simp)
@[simp] theorem USize.ofFin_rev (a : Fin USize.size) : USize.ofFin a.rev = ~~~USize.ofFin a := USize.toFin_inj.1 (by simp)
@[simp] theorem UInt8.ofBitVec_shiftLeft (a : BitVec 8) (b : Nat) (hb : b < 8) : UInt8.ofBitVec (a <<< b) = UInt8.ofBitVec a <<< UInt8.ofNat b :=
UInt8.toBitVec_inj.1 (by simp [Nat.mod_eq_of_lt hb])
@[simp] theorem UInt16.ofBitVec_shiftLeft (a : BitVec 16) (b : Nat) (hb : b < 16) : UInt16.ofBitVec (a <<< b) = UInt16.ofBitVec a <<< UInt16.ofNat b :=
UInt16.toBitVec_inj.1 (by simp [Nat.mod_eq_of_lt hb])
@[simp] theorem UInt32.ofBitVec_shiftLeft (a : BitVec 32) (b : Nat) (hb : b < 32) : UInt32.ofBitVec (a <<< b) = UInt32.ofBitVec a <<< UInt32.ofNat b :=
UInt32.toBitVec_inj.1 (by simp [Nat.mod_eq_of_lt hb])
@[simp] theorem UInt64.ofBitVec_shiftLeft (a : BitVec 64) (b : Nat) (hb : b < 64) : UInt64.ofBitVec (a <<< b) = UInt64.ofBitVec a <<< UInt64.ofNat b :=
UInt64.toBitVec_inj.1 (by simp [Nat.mod_eq_of_lt hb])
@[simp] theorem USize.ofBitVec_shiftLeft (a : BitVec System.Platform.numBits) (b : Nat) (hb : b < System.Platform.numBits) :
USize.ofBitVec (a <<< b) = USize.ofBitVec a <<< USize.ofNat b := by
apply USize.toBitVec_inj.1
simp only [USize.toBitVec_shiftLeft, BitVec.natCast_eq_ofNat, BitVec.shiftLeft_eq',
BitVec.toNat_umod, toNat_toBitVec, toNat_ofNat', BitVec.toNat_ofNat, Nat.mod_two_pow_self]
rw [Nat.mod_mod_of_dvd _ (by cases System.Platform.numBits_eq <;> simp_all), Nat.mod_eq_of_lt hb]
@[simp] theorem UInt8.ofBitVec_shiftLeft_mod (a : BitVec 8) (b : Nat) : UInt8.ofBitVec (a <<< (b % 8)) = UInt8.ofBitVec a <<< UInt8.ofNat b :=
UInt8.toBitVec_inj.1 (by simp)
@[simp] theorem UInt16.ofBitVec_shiftLeft_mod (a : BitVec 16) (b : Nat) : UInt16.ofBitVec (a <<< (b % 16)) = UInt16.ofBitVec a <<< UInt16.ofNat b :=
UInt16.toBitVec_inj.1 (by simp)
@[simp] theorem UInt32.ofBitVec_shiftLeft_mod (a : BitVec 32) (b : Nat) : UInt32.ofBitVec (a <<< (b % 32)) = UInt32.ofBitVec a <<< UInt32.ofNat b :=
UInt32.toBitVec_inj.1 (by simp)
@[simp] theorem UInt64.ofBitVec_shiftLeft_mod (a : BitVec 64) (b : Nat) : UInt64.ofBitVec (a <<< (b % 64)) = UInt64.ofBitVec a <<< UInt64.ofNat b :=
UInt64.toBitVec_inj.1 (by simp)
@[simp] theorem USize.ofBitVec_shiftLeft_mod (a : BitVec System.Platform.numBits) (b : Nat) :
USize.ofBitVec (a <<< (b % System.Platform.numBits)) = USize.ofBitVec a <<< USize.ofNat b := by
apply USize.toBitVec_inj.1
simp only [USize.toBitVec_shiftLeft, BitVec.natCast_eq_ofNat, BitVec.shiftLeft_eq',
BitVec.toNat_umod, toNat_toBitVec, toNat_ofNat', BitVec.toNat_ofNat, Nat.mod_two_pow_self]
rw [Nat.mod_mod_of_dvd _ (by cases System.Platform.numBits_eq <;> simp_all)]
@[simp] theorem UInt8.ofFin_shiftLeft (a b : Fin UInt8.size) (hb : b < 8) : UInt8.ofFin (a <<< b) = UInt8.ofFin a <<< UInt8.ofFin b :=
UInt8.toFin_inj.1 (by simp [UInt8.toFin_shiftLeft (ofFin a) (ofFin b) hb])
@[simp] theorem UInt16.ofFin_shiftLeft (a b : Fin UInt16.size) (hb : b < 16) : UInt16.ofFin (a <<< b) = UInt16.ofFin a <<< UInt16.ofFin b :=
UInt16.toFin_inj.1 (by simp [UInt16.toFin_shiftLeft (ofFin a) (ofFin b) hb])
@[simp] theorem UInt32.ofFin_shiftLeft (a b : Fin UInt32.size) (hb : b < 32) : UInt32.ofFin (a <<< b) = UInt32.ofFin a <<< UInt32.ofFin b :=
UInt32.toFin_inj.1 (by simp [UInt32.toFin_shiftLeft (ofFin a) (ofFin b) hb])
@[simp] theorem UInt64.ofFin_shiftLeft (a b : Fin UInt64.size) (hb : b < 64) : UInt64.ofFin (a <<< b) = UInt64.ofFin a <<< UInt64.ofFin b :=
UInt64.toFin_inj.1 (by simp [UInt64.toFin_shiftLeft (ofFin a) (ofFin b) hb])
@[simp] theorem USize.ofFin_shiftLeft (a b : Fin USize.size) (hb : b < System.Platform.numBits) : USize.ofFin (a <<< b) = USize.ofFin a <<< USize.ofFin b :=
USize.toFin_inj.1 (by simp [USize.toFin_shiftLeft (ofFin a) (ofFin b) hb])
@[simp] theorem UInt8.ofFin_shiftLeft_mod (a b : Fin UInt8.size) : UInt8.ofFin (a <<< (b % 8)) = UInt8.ofFin a <<< UInt8.ofFin b :=
UInt8.toNat_inj.1 (by simp; rfl)
@[simp] theorem UInt16.ofFin_shiftLeft_mod (a b : Fin UInt16.size) : UInt16.ofFin (a <<< (b % 16)) = UInt16.ofFin a <<< UInt16.ofFin b :=
UInt16.toNat_inj.1 (by simp; rfl)
@[simp] theorem UInt32.ofFin_shiftLeft_mod (a b : Fin UInt32.size) : UInt32.ofFin (a <<< (b % 32)) = UInt32.ofFin a <<< UInt32.ofFin b :=
UInt32.toNat_inj.1 (by simp; rfl)
@[simp] theorem UInt64.ofFin_shiftLeft_mod (a b : Fin UInt64.size) : UInt64.ofFin (a <<< (b % 64)) = UInt64.ofFin a <<< UInt64.ofFin b :=
UInt64.toNat_inj.1 (by simp; rfl)
@[simp] theorem USize.ofFin_shiftLeft_mod (a b : Fin USize.size) :
USize.ofFin (a <<< (b % System.Platform.numBits, by cases System.Platform.numBits_eq <;> simp_all [USize.size])) = USize.ofFin a <<< USize.ofFin b := by
apply USize.toFin_inj.1
rw [toFin_ofFin, USize.shiftLeft_eq_shiftLeft_mod, USize.toFin_shiftLeft, toFin_ofFin, USize.toFin_mod,
toFin_ofFin, toFin_ofNat', Fin.ofNat'_val_eq_self System.Platform.numBits, _]
rw [USize.toNat_mod, toNat_ofNat']
cases System.Platform.numBits_eq <;> simpa [*] using Nat.mod_lt _ (by decide)
@[simp] theorem UInt8.ofNat_shiftLeft (a b : Nat) (hb : b < 8) :
UInt8.ofNat (a <<< b) = UInt8.ofNat a <<< UInt8.ofNat b := by
rw [UInt8.ofNat_eq_iff_mod_eq_toNat, UInt8.toNat_shiftLeft, toNat_ofNat', toNat_ofNat',
Nat.mod_mod_of_dvd _ (by decide), Nat.mod_eq_of_lt hb, Nat.mod_two_pow_shiftLeft_mod_two_pow]
@[simp] theorem UInt16.ofNat_shiftLeft (a b : Nat) (hb : b < 16) :
UInt16.ofNat (a <<< b) = UInt16.ofNat a <<< UInt16.ofNat b := by
rw [UInt16.ofNat_eq_iff_mod_eq_toNat, UInt16.toNat_shiftLeft, toNat_ofNat', toNat_ofNat',
Nat.mod_mod_of_dvd _ (by decide), Nat.mod_eq_of_lt hb, Nat.mod_two_pow_shiftLeft_mod_two_pow]
@[simp] theorem UInt32.ofNat_shiftLeft (a b : Nat) (hb : b < 32) :
UInt32.ofNat (a <<< b) = UInt32.ofNat a <<< UInt32.ofNat b := by
rw [UInt32.ofNat_eq_iff_mod_eq_toNat, UInt32.toNat_shiftLeft, toNat_ofNat', toNat_ofNat',
Nat.mod_mod_of_dvd _ (by decide), Nat.mod_eq_of_lt hb, Nat.mod_two_pow_shiftLeft_mod_two_pow]
@[simp] theorem UInt64.ofNat_shiftLeft (a b : Nat) (hb : b < 64) :
UInt64.ofNat (a <<< b) = UInt64.ofNat a <<< UInt64.ofNat b := by
rw [UInt64.ofNat_eq_iff_mod_eq_toNat, UInt64.toNat_shiftLeft, toNat_ofNat', toNat_ofNat',
Nat.mod_mod_of_dvd _ (by decide), Nat.mod_eq_of_lt hb, Nat.mod_two_pow_shiftLeft_mod_two_pow]
@[simp] theorem USize.ofNat_shiftLeft (a b : Nat) (hb : b < System.Platform.numBits) :
USize.ofNat (a <<< b) = USize.ofNat a <<< USize.ofNat b := by
rw [USize.ofNat_eq_iff_mod_eq_toNat, USize.toNat_shiftLeft, toNat_ofNat', toNat_ofNat',
Nat.mod_mod_of_dvd _ _, Nat.mod_eq_of_lt hb, Nat.mod_two_pow_shiftLeft_mod_two_pow]
cases System.Platform.numBits_eq <;> simp_all
@[simp] theorem UInt8.ofNatLT_shiftLeft {a b : Nat} (ha : a <<< b < UInt8.size) (hb : b < 8) :
UInt8.ofNatLT (a <<< b) ha = UInt8.ofNatLT a (Nat.lt_of_shiftLeft_lt ha) <<< UInt8.ofNatLT b (Nat.lt_trans hb (by decide)) := by
simp [UInt8.ofNatLT_eq_ofNat, UInt8.ofNat_shiftLeft a b hb]
@[simp] theorem UInt16.ofNatLT_shiftLeft {a b : Nat} (ha : a <<< b < UInt16.size) (hb : b < 16) :
UInt16.ofNatLT (a <<< b) ha = UInt16.ofNatLT a (Nat.lt_of_shiftLeft_lt ha) <<< UInt16.ofNatLT b (Nat.lt_trans hb (by decide)) := by
simp [UInt16.ofNatLT_eq_ofNat, UInt16.ofNat_shiftLeft a b hb]
@[simp] theorem UInt32.ofNatLT_shiftLeft {a b : Nat} (ha : a <<< b < UInt32.size) (hb : b < 32) :
UInt32.ofNatLT (a <<< b) ha = UInt32.ofNatLT a (Nat.lt_of_shiftLeft_lt ha) <<< UInt32.ofNatLT b (Nat.lt_trans hb (by decide)) := by
simp [UInt32.ofNatLT_eq_ofNat, UInt32.ofNat_shiftLeft a b hb]
@[simp] theorem UInt64.ofNatLT_shiftLeft {a b : Nat} (ha : a <<< b < UInt64.size) (hb : b < 64) :
UInt64.ofNatLT (a <<< b) ha = UInt64.ofNatLT a (Nat.lt_of_shiftLeft_lt ha) <<< UInt64.ofNatLT b (Nat.lt_trans hb (by decide)) := by
simp [UInt64.ofNatLT_eq_ofNat, UInt64.ofNat_shiftLeft a b hb]
@[simp] theorem USize.ofNatLT_shiftLeft {a b : Nat} (ha : a <<< b < USize.size) (hb : b < System.Platform.numBits) :
USize.ofNatLT (a <<< b) ha = USize.ofNatLT a (Nat.lt_of_shiftLeft_lt ha) <<< USize.ofNatLT b (Nat.lt_trans hb Nat.lt_two_pow_self) := by
simp [USize.ofNatLT_eq_ofNat, USize.ofNat_shiftLeft a b hb]
@[simp] theorem UInt8.ofBitVec_shiftRight (a : BitVec 8) (b : Nat) (hb : b < 8) : UInt8.ofBitVec (a >>> b) = UInt8.ofBitVec a >>> UInt8.ofNat b :=
UInt8.toBitVec_inj.1 (by simp [Nat.mod_eq_of_lt hb])
@[simp] theorem UInt16.ofBitVec_shiftRight (a : BitVec 16) (b : Nat) (hb : b < 16) : UInt16.ofBitVec (a >>> b) = UInt16.ofBitVec a >>> UInt16.ofNat b :=
UInt16.toBitVec_inj.1 (by simp [Nat.mod_eq_of_lt hb])
@[simp] theorem UInt32.ofBitVec_shiftRight (a : BitVec 32) (b : Nat) (hb : b < 32) : UInt32.ofBitVec (a >>> b) = UInt32.ofBitVec a >>> UInt32.ofNat b :=
UInt32.toBitVec_inj.1 (by simp [Nat.mod_eq_of_lt hb])
@[simp] theorem UInt64.ofBitVec_shiftRight (a : BitVec 64) (b : Nat) (hb : b < 64) : UInt64.ofBitVec (a >>> b) = UInt64.ofBitVec a >>> UInt64.ofNat b :=
UInt64.toBitVec_inj.1 (by simp [Nat.mod_eq_of_lt hb])
@[simp] theorem USize.ofBitVec_shiftRight (a : BitVec System.Platform.numBits) (b : Nat) (hb : b < System.Platform.numBits) :
USize.ofBitVec (a >>> b) = USize.ofBitVec a >>> USize.ofNat b := by
apply USize.toBitVec_inj.1
simp only [USize.toBitVec_shiftRight, BitVec.natCast_eq_ofNat, BitVec.ushiftRight_eq',
BitVec.toNat_umod, toNat_toBitVec, toNat_ofNat', BitVec.toNat_ofNat, Nat.mod_two_pow_self]
rw [Nat.mod_mod_of_dvd _ (by cases System.Platform.numBits_eq <;> simp_all), Nat.mod_eq_of_lt hb]
@[simp] theorem UInt8.ofBitVec_shiftRight_mod (a : BitVec 8) (b : Nat) : UInt8.ofBitVec (a >>> (b % 8)) = UInt8.ofBitVec a >>> UInt8.ofNat b :=
UInt8.toBitVec_inj.1 (by simp)
@[simp] theorem UInt16.ofBitVec_shiftRight_mod (a : BitVec 16) (b : Nat) : UInt16.ofBitVec (a >>> (b % 16)) = UInt16.ofBitVec a >>> UInt16.ofNat b :=
UInt16.toBitVec_inj.1 (by simp)
@[simp] theorem UInt32.ofBitVec_shiftRight_mod (a : BitVec 32) (b : Nat) : UInt32.ofBitVec (a >>> (b % 32)) = UInt32.ofBitVec a >>> UInt32.ofNat b :=
UInt32.toBitVec_inj.1 (by simp)
@[simp] theorem UInt64.ofBitVec_shiftRight_mod (a : BitVec 64) (b : Nat) : UInt64.ofBitVec (a >>> (b % 64)) = UInt64.ofBitVec a >>> UInt64.ofNat b :=
UInt64.toBitVec_inj.1 (by simp)
@[simp] theorem USize.ofBitVec_shiftRight_mod (a : BitVec System.Platform.numBits) (b : Nat) :
USize.ofBitVec (a >>> (b % System.Platform.numBits)) = USize.ofBitVec a >>> USize.ofNat b := by
apply USize.toBitVec_inj.1
simp only [USize.toBitVec_shiftRight, BitVec.natCast_eq_ofNat, BitVec.ushiftRight_eq',
BitVec.toNat_umod, toNat_toBitVec, toNat_ofNat', BitVec.toNat_ofNat, Nat.mod_two_pow_self]
rw [Nat.mod_mod_of_dvd _ (by cases System.Platform.numBits_eq <;> simp_all)]
@[simp] theorem UInt8.ofFin_shiftRight (a b : Fin UInt8.size) (hb : b < 8) : UInt8.ofFin (a >>> b) = UInt8.ofFin a >>> UInt8.ofFin b :=
UInt8.toFin_inj.1 (by simp [UInt8.toFin_shiftRight (ofFin a) (ofFin b) hb])
@[simp] theorem UInt16.ofFin_shiftRight (a b : Fin UInt16.size) (hb : b < 16) : UInt16.ofFin (a >>> b) = UInt16.ofFin a >>> UInt16.ofFin b :=
UInt16.toFin_inj.1 (by simp [UInt16.toFin_shiftRight (ofFin a) (ofFin b) hb])
@[simp] theorem UInt32.ofFin_shiftRight (a b : Fin UInt32.size) (hb : b < 32) : UInt32.ofFin (a >>> b) = UInt32.ofFin a >>> UInt32.ofFin b :=
UInt32.toFin_inj.1 (by simp [UInt32.toFin_shiftRight (ofFin a) (ofFin b) hb])
@[simp] theorem UInt64.ofFin_shiftRight (a b : Fin UInt64.size) (hb : b < 64) : UInt64.ofFin (a >>> b) = UInt64.ofFin a >>> UInt64.ofFin b :=
UInt64.toFin_inj.1 (by simp [UInt64.toFin_shiftRight (ofFin a) (ofFin b) hb])
@[simp] theorem USize.ofFin_shiftRight (a b : Fin USize.size) (hb : b < System.Platform.numBits) : USize.ofFin (a >>> b) = USize.ofFin a >>> USize.ofFin b :=
USize.toFin_inj.1 (by simp [USize.toFin_shiftRight (ofFin a) (ofFin b) hb])
@[simp] theorem UInt8.ofFin_shiftRight_mod (a b : Fin UInt8.size) : UInt8.ofFin (a >>> (b % 8)) = UInt8.ofFin a >>> UInt8.ofFin b :=
UInt8.toNat_inj.1 (by simp; rfl)
@[simp] theorem UInt16.ofFin_shiftRight_mod (a b : Fin UInt16.size) : UInt16.ofFin (a >>> (b % 16)) = UInt16.ofFin a >>> UInt16.ofFin b :=
UInt16.toNat_inj.1 (by simp; rfl)
@[simp] theorem UInt32.ofFin_shiftRight_mod (a b : Fin UInt32.size) : UInt32.ofFin (a >>> (b % 32)) = UInt32.ofFin a >>> UInt32.ofFin b :=
UInt32.toNat_inj.1 (by simp; rfl)
@[simp] theorem UInt64.ofFin_shiftRight_mod (a b : Fin UInt64.size) : UInt64.ofFin (a >>> (b % 64)) = UInt64.ofFin a >>> UInt64.ofFin b :=
UInt64.toNat_inj.1 (by simp; rfl)
@[simp] theorem USize.ofFin_shiftRight_mod (a b : Fin USize.size) :
USize.ofFin (a >>> (b % System.Platform.numBits, by cases System.Platform.numBits_eq <;> simp_all [USize.size])) = USize.ofFin a >>> USize.ofFin b := by
apply USize.toFin_inj.1
rw [toFin_ofFin, USize.shiftRight_eq_shiftRight_mod, USize.toFin_shiftRight, toFin_ofFin, USize.toFin_mod,
toFin_ofFin, toFin_ofNat', Fin.ofNat'_val_eq_self System.Platform.numBits, _]
rw [USize.toNat_mod, toNat_ofNat']
cases System.Platform.numBits_eq <;> simpa [*] using Nat.mod_lt _ (by decide)
theorem UInt8.neg_eq_not_add (a : UInt8) : -a = ~~~a + 1 := UInt8.toBitVec_inj.1 (BitVec.neg_eq_not_add _)
theorem UInt16.neg_eq_not_add (a : UInt16) : -a = ~~~a + 1 := UInt16.toBitVec_inj.1 (BitVec.neg_eq_not_add _)
theorem UInt32.neg_eq_not_add (a : UInt32) : -a = ~~~a + 1 := UInt32.toBitVec_inj.1 (BitVec.neg_eq_not_add _)
theorem UInt64.neg_eq_not_add (a : UInt64) : -a = ~~~a + 1 := UInt64.toBitVec_inj.1 (BitVec.neg_eq_not_add _)
theorem USize.neg_eq_not_add (a : USize) : -a = ~~~a + 1 := USize.toBitVec_inj.1 (BitVec.neg_eq_not_add _)
theorem UInt8.not_eq_neg_sub (a : UInt8) : ~~~a = -a - 1 := UInt8.toBitVec_inj.1 (BitVec.not_eq_neg_add _)
theorem UInt16.not_eq_neg_sub (a : UInt16) : ~~~a = -a - 1 := UInt16.toBitVec_inj.1 (BitVec.not_eq_neg_add _)
theorem UInt32.not_eq_neg_sub (a : UInt32) : ~~~a = -a - 1 := UInt32.toBitVec_inj.1 (BitVec.not_eq_neg_add _)
theorem UInt64.not_eq_neg_sub (a : UInt64) : ~~~a = -a - 1 := UInt64.toBitVec_inj.1 (BitVec.not_eq_neg_add _)
theorem USize.not_eq_neg_sub (a : USize) : ~~~a = -a - 1 := USize.toBitVec_inj.1 (BitVec.not_eq_neg_add _)
protected theorem UInt8.or_assoc (a b c : UInt8) : a ||| b ||| c = a ||| (b ||| c) := UInt8.toBitVec_inj.1 (BitVec.or_assoc _ _ _)
protected theorem UInt16.or_assoc (a b c : UInt16) : a ||| b ||| c = a ||| (b ||| c) := UInt16.toBitVec_inj.1 (BitVec.or_assoc _ _ _)
protected theorem UInt32.or_assoc (a b c : UInt32) : a ||| b ||| c = a ||| (b ||| c) := UInt32.toBitVec_inj.1 (BitVec.or_assoc _ _ _)
protected theorem UInt64.or_assoc (a b c : UInt64) : a ||| b ||| c = a ||| (b ||| c) := UInt64.toBitVec_inj.1 (BitVec.or_assoc _ _ _)
protected theorem USize.or_assoc (a b c : USize) : a ||| b ||| c = a ||| (b ||| c) := USize.toBitVec_inj.1 (BitVec.or_assoc _ _ _)
instance : Std.Associative (α := UInt8) (· ||| ·) := UInt8.or_assoc
instance : Std.Associative (α := UInt16) (· ||| ·) := UInt16.or_assoc
instance : Std.Associative (α := UInt32) (· ||| ·) := UInt32.or_assoc
instance : Std.Associative (α := UInt64) (· ||| ·) := UInt64.or_assoc
instance : Std.Associative (α := USize) (· ||| ·) := USize.or_assoc
protected theorem UInt8.or_comm (a b : UInt8) : a ||| b = b ||| a := UInt8.toBitVec_inj.1 (BitVec.or_comm _ _)
protected theorem UInt16.or_comm (a b : UInt16) : a ||| b = b ||| a := UInt16.toBitVec_inj.1 (BitVec.or_comm _ _)
protected theorem UInt32.or_comm (a b : UInt32) : a ||| b = b ||| a := UInt32.toBitVec_inj.1 (BitVec.or_comm _ _)
protected theorem UInt64.or_comm (a b : UInt64) : a ||| b = b ||| a := UInt64.toBitVec_inj.1 (BitVec.or_comm _ _)
protected theorem USize.or_comm (a b : USize) : a ||| b = b ||| a := USize.toBitVec_inj.1 (BitVec.or_comm _ _)
instance : Std.Commutative (α := UInt8) (· ||| ·) := UInt8.or_comm
instance : Std.Commutative (α := UInt16) (· ||| ·) := UInt16.or_comm
instance : Std.Commutative (α := UInt32) (· ||| ·) := UInt32.or_comm
instance : Std.Commutative (α := UInt64) (· ||| ·) := UInt64.or_comm
instance : Std.Commutative (α := USize) (· ||| ·) := USize.or_comm
@[simp] protected theorem UInt8.or_self {a : UInt8} : a ||| a = a := UInt8.toBitVec_inj.1 BitVec.or_self
@[simp] protected theorem UInt16.or_self {a : UInt16} : a ||| a = a := UInt16.toBitVec_inj.1 BitVec.or_self
@[simp] protected theorem UInt32.or_self {a : UInt32} : a ||| a = a := UInt32.toBitVec_inj.1 BitVec.or_self
@[simp] protected theorem UInt64.or_self {a : UInt64} : a ||| a = a := UInt64.toBitVec_inj.1 BitVec.or_self
@[simp] protected theorem USize.or_self {a : USize} : a ||| a = a := USize.toBitVec_inj.1 BitVec.or_self
instance : Std.IdempotentOp (α := UInt8) (· ||| ·) := fun _ => UInt8.or_self
instance : Std.IdempotentOp (α := UInt16) (· ||| ·) := fun _ => UInt16.or_self
instance : Std.IdempotentOp (α := UInt32) (· ||| ·) := fun _ => UInt32.or_self
instance : Std.IdempotentOp (α := UInt64) (· ||| ·) := fun _ => UInt64.or_self
instance : Std.IdempotentOp (α := USize) (· ||| ·) := fun _ => USize.or_self
@[simp] protected theorem UInt8.or_zero {a : UInt8} : a ||| 0 = a := UInt8.toBitVec_inj.1 BitVec.or_zero
@[simp] protected theorem UInt16.or_zero {a : UInt16} : a ||| 0 = a := UInt16.toBitVec_inj.1 BitVec.or_zero
@[simp] protected theorem UInt32.or_zero {a : UInt32} : a ||| 0 = a := UInt32.toBitVec_inj.1 BitVec.or_zero
@[simp] protected theorem UInt64.or_zero {a : UInt64} : a ||| 0 = a := UInt64.toBitVec_inj.1 BitVec.or_zero
@[simp] protected theorem USize.or_zero {a : USize} : a ||| 0 = a := USize.toBitVec_inj.1 BitVec.or_zero
@[simp] protected theorem UInt8.zero_or {a : UInt8} : 0 ||| a = a := UInt8.toBitVec_inj.1 BitVec.zero_or
@[simp] protected theorem UInt16.zero_or {a : UInt16} : 0 ||| a = a := UInt16.toBitVec_inj.1 BitVec.zero_or
@[simp] protected theorem UInt32.zero_or {a : UInt32} : 0 ||| a = a := UInt32.toBitVec_inj.1 BitVec.zero_or
@[simp] protected theorem UInt64.zero_or {a : UInt64} : 0 ||| a = a := UInt64.toBitVec_inj.1 BitVec.zero_or
@[simp] protected theorem USize.zero_or {a : USize} : 0 ||| a = a := USize.toBitVec_inj.1 BitVec.zero_or
instance : Std.LawfulCommIdentity (α := UInt8) (· ||| ·) 0 where
right_id _ := UInt8.or_zero
instance : Std.LawfulCommIdentity (α := UInt16) (· ||| ·) 0 where
right_id _ := UInt16.or_zero
instance : Std.LawfulCommIdentity (α := UInt32) (· ||| ·) 0 where
right_id _ := UInt32.or_zero
instance : Std.LawfulCommIdentity (α := UInt64) (· ||| ·) 0 where
right_id _ := UInt64.or_zero
instance : Std.LawfulCommIdentity (α := USize) (· ||| ·) 0 where
right_id _ := USize.or_zero
@[simp] theorem UInt8.neg_one_or {a : UInt8} : -1 ||| a = -1 := by
rw [ UInt8.toBitVec_inj, UInt8.toBitVec_or, UInt8.toBitVec_neg, UInt8.toBitVec_one,
BitVec.negOne_eq_allOnes, BitVec.allOnes_or]
@[simp] theorem UInt16.neg_one_or {a : UInt16} : -1 ||| a = -1 := by
rw [ UInt16.toBitVec_inj, UInt16.toBitVec_or, UInt16.toBitVec_neg, UInt16.toBitVec_one,
BitVec.negOne_eq_allOnes, BitVec.allOnes_or]
@[simp] theorem UInt32.neg_one_or {a : UInt32} : -1 ||| a = -1 := by
rw [ UInt32.toBitVec_inj, UInt32.toBitVec_or, UInt32.toBitVec_neg, UInt32.toBitVec_one,
BitVec.negOne_eq_allOnes, BitVec.allOnes_or]
@[simp] theorem UInt64.neg_one_or {a : UInt64} : -1 ||| a = -1 := by
rw [ UInt64.toBitVec_inj, UInt64.toBitVec_or, UInt64.toBitVec_neg, UInt64.toBitVec_one,
BitVec.negOne_eq_allOnes, BitVec.allOnes_or]
@[simp] theorem USize.neg_one_or {a : USize} : -1 ||| a = -1 := by
rw [ USize.toBitVec_inj, USize.toBitVec_or, USize.toBitVec_neg, USize.toBitVec_one,
BitVec.negOne_eq_allOnes, BitVec.allOnes_or]
@[simp] theorem UInt8.or_neg_one {a : UInt8} : a ||| -1 = -1 := by rw [UInt8.or_comm, neg_one_or]
@[simp] theorem UInt16.or_neg_one {a : UInt16} : a ||| -1 = -1 := by rw [UInt16.or_comm, neg_one_or]
@[simp] theorem UInt32.or_neg_one {a : UInt32} : a ||| -1 = -1 := by rw [UInt32.or_comm, neg_one_or]
@[simp] theorem UInt64.or_neg_one {a : UInt64} : a ||| -1 = -1 := by rw [UInt64.or_comm, neg_one_or]
@[simp] theorem USize.or_neg_one {a : USize} : a ||| -1 = -1 := by rw [USize.or_comm, neg_one_or]
@[simp] theorem UInt8.or_eq_zero_iff {a b : UInt8} : a ||| b = 0 a = 0 b = 0 := by
simp [ UInt8.toBitVec_inj]
@[simp] theorem UInt16.or_eq_zero_iff {a b : UInt16} : a ||| b = 0 a = 0 b = 0 := by
simp [ UInt16.toBitVec_inj]
@[simp] theorem UInt32.or_eq_zero_iff {a b : UInt32} : a ||| b = 0 a = 0 b = 0 := by
simp [ UInt32.toBitVec_inj]
@[simp] theorem UInt64.or_eq_zero_iff {a b : UInt64} : a ||| b = 0 a = 0 b = 0 := by
simp [ UInt64.toBitVec_inj]
@[simp] theorem USize.or_eq_zero_iff {a b : USize} : a ||| b = 0 a = 0 b = 0 := by
simp [ USize.toBitVec_inj]
protected theorem UInt8.and_assoc (a b c : UInt8) : a &&& b &&& c = a &&& (b &&& c) := UInt8.toBitVec_inj.1 (BitVec.and_assoc _ _ _)
protected theorem UInt16.and_assoc (a b c : UInt16) : a &&& b &&& c = a &&& (b &&& c) := UInt16.toBitVec_inj.1 (BitVec.and_assoc _ _ _)
protected theorem UInt32.and_assoc (a b c : UInt32) : a &&& b &&& c = a &&& (b &&& c) := UInt32.toBitVec_inj.1 (BitVec.and_assoc _ _ _)
protected theorem UInt64.and_assoc (a b c : UInt64) : a &&& b &&& c = a &&& (b &&& c) := UInt64.toBitVec_inj.1 (BitVec.and_assoc _ _ _)
protected theorem USize.and_assoc (a b c : USize) : a &&& b &&& c = a &&& (b &&& c) := USize.toBitVec_inj.1 (BitVec.and_assoc _ _ _)
instance : Std.Associative (α := UInt8) (· &&& ·) := UInt8.and_assoc
instance : Std.Associative (α := UInt16) (· &&& ·) := UInt16.and_assoc
instance : Std.Associative (α := UInt32) (· &&& ·) := UInt32.and_assoc
instance : Std.Associative (α := UInt64) (· &&& ·) := UInt64.and_assoc
instance : Std.Associative (α := USize) (· &&& ·) := USize.and_assoc
protected theorem UInt8.and_comm (a b : UInt8) : a &&& b = b &&& a := UInt8.toBitVec_inj.1 (BitVec.and_comm _ _)
protected theorem UInt16.and_comm (a b : UInt16) : a &&& b = b &&& a := UInt16.toBitVec_inj.1 (BitVec.and_comm _ _)
protected theorem UInt32.and_comm (a b : UInt32) : a &&& b = b &&& a := UInt32.toBitVec_inj.1 (BitVec.and_comm _ _)
protected theorem UInt64.and_comm (a b : UInt64) : a &&& b = b &&& a := UInt64.toBitVec_inj.1 (BitVec.and_comm _ _)
protected theorem USize.and_comm (a b : USize) : a &&& b = b &&& a := USize.toBitVec_inj.1 (BitVec.and_comm _ _)
instance : Std.Commutative (α := UInt8) (· &&& ·) := UInt8.and_comm
instance : Std.Commutative (α := UInt16) (· &&& ·) := UInt16.and_comm
instance : Std.Commutative (α := UInt32) (· &&& ·) := UInt32.and_comm
instance : Std.Commutative (α := UInt64) (· &&& ·) := UInt64.and_comm
instance : Std.Commutative (α := USize) (· &&& ·) := USize.and_comm
@[simp] protected theorem UInt8.and_self {a : UInt8} : a &&& a = a := UInt8.toBitVec_inj.1 BitVec.and_self
@[simp] protected theorem UInt16.and_self {a : UInt16} : a &&& a = a := UInt16.toBitVec_inj.1 BitVec.and_self
@[simp] protected theorem UInt32.and_self {a : UInt32} : a &&& a = a := UInt32.toBitVec_inj.1 BitVec.and_self
@[simp] protected theorem UInt64.and_self {a : UInt64} : a &&& a = a := UInt64.toBitVec_inj.1 BitVec.and_self
@[simp] protected theorem USize.and_self {a : USize} : a &&& a = a := USize.toBitVec_inj.1 BitVec.and_self
instance : Std.IdempotentOp (α := UInt8) (· &&& ·) := fun _ => UInt8.and_self
instance : Std.IdempotentOp (α := UInt16) (· &&& ·) := fun _ => UInt16.and_self
instance : Std.IdempotentOp (α := UInt32) (· &&& ·) := fun _ => UInt32.and_self
instance : Std.IdempotentOp (α := UInt64) (· &&& ·) := fun _ => UInt64.and_self
instance : Std.IdempotentOp (α := USize) (· &&& ·) := fun _ => USize.and_self
@[simp] protected theorem UInt8.and_zero {a : UInt8} : a &&& 0 = 0 := UInt8.toBitVec_inj.1 BitVec.and_zero
@[simp] protected theorem UInt16.and_zero {a : UInt16} : a &&& 0 = 0 := UInt16.toBitVec_inj.1 BitVec.and_zero
@[simp] protected theorem UInt32.and_zero {a : UInt32} : a &&& 0 = 0 := UInt32.toBitVec_inj.1 BitVec.and_zero
@[simp] protected theorem UInt64.and_zero {a : UInt64} : a &&& 0 = 0 := UInt64.toBitVec_inj.1 BitVec.and_zero
@[simp] protected theorem USize.and_zero {a : USize} : a &&& 0 = 0 := USize.toBitVec_inj.1 BitVec.and_zero
@[simp] protected theorem UInt8.zero_and {a : UInt8} : 0 &&& a = 0 := UInt8.toBitVec_inj.1 BitVec.zero_and
@[simp] protected theorem UInt16.zero_and {a : UInt16} : 0 &&& a = 0 := UInt16.toBitVec_inj.1 BitVec.zero_and
@[simp] protected theorem UInt32.zero_and {a : UInt32} : 0 &&& a = 0 := UInt32.toBitVec_inj.1 BitVec.zero_and
@[simp] protected theorem UInt64.zero_and {a : UInt64} : 0 &&& a = 0 := UInt64.toBitVec_inj.1 BitVec.zero_and
@[simp] protected theorem USize.zero_and {a : USize} : 0 &&& a = 0 := USize.toBitVec_inj.1 BitVec.zero_and
@[simp] theorem UInt8.neg_one_and {a : UInt8} : -1 &&& a = a := by
rw [ UInt8.toBitVec_inj, UInt8.toBitVec_and, UInt8.toBitVec_neg, UInt8.toBitVec_one,
BitVec.negOne_eq_allOnes, BitVec.allOnes_and]
@[simp] theorem UInt16.neg_one_and {a : UInt16} : -1 &&& a = a := by
rw [ UInt16.toBitVec_inj, UInt16.toBitVec_and, UInt16.toBitVec_neg, UInt16.toBitVec_one,
BitVec.negOne_eq_allOnes, BitVec.allOnes_and]
@[simp] theorem UInt32.neg_one_and {a : UInt32} : -1 &&& a = a := by
rw [ UInt32.toBitVec_inj, UInt32.toBitVec_and, UInt32.toBitVec_neg, UInt32.toBitVec_one,
BitVec.negOne_eq_allOnes, BitVec.allOnes_and]
@[simp] theorem UInt64.neg_one_and {a : UInt64} : -1 &&& a = a := by
rw [ UInt64.toBitVec_inj, UInt64.toBitVec_and, UInt64.toBitVec_neg, UInt64.toBitVec_one,
BitVec.negOne_eq_allOnes, BitVec.allOnes_and]
@[simp] theorem USize.neg_one_and {a : USize} : -1 &&& a = a := by
rw [ USize.toBitVec_inj, USize.toBitVec_and, USize.toBitVec_neg, USize.toBitVec_one,
BitVec.negOne_eq_allOnes, BitVec.allOnes_and]
@[simp] theorem UInt8.and_neg_one {a : UInt8} : a &&& -1 = a := by rw [UInt8.and_comm, neg_one_and]
@[simp] theorem UInt16.and_neg_one {a : UInt16} : a &&& -1 = a := by rw [UInt16.and_comm, neg_one_and]
@[simp] theorem UInt32.and_neg_one {a : UInt32} : a &&& -1 = a := by rw [UInt32.and_comm, neg_one_and]
@[simp] theorem UInt64.and_neg_one {a : UInt64} : a &&& -1 = a := by rw [UInt64.and_comm, neg_one_and]
@[simp] theorem USize.and_neg_one {a : USize} : a &&& -1 = a := by rw [USize.and_comm, neg_one_and]
instance : Std.LawfulCommIdentity (α := UInt8) (· &&& ·) (-1) where
right_id _ := UInt8.and_neg_one
instance : Std.LawfulCommIdentity (α := UInt16) (· &&& ·) (-1) where
right_id _ := UInt16.and_neg_one
instance : Std.LawfulCommIdentity (α := UInt32) (· &&& ·) (-1) where
right_id _ := UInt32.and_neg_one
instance : Std.LawfulCommIdentity (α := UInt64) (· &&& ·) (-1) where
right_id _ := UInt64.and_neg_one
instance : Std.LawfulCommIdentity (α := USize) (· &&& ·) (-1) where
right_id _ := USize.and_neg_one
@[simp] theorem UInt8.and_eq_neg_one_iff {a b : UInt8} : a &&& b = -1 a = -1 b = -1 := by
simp only [ UInt8.toBitVec_inj, UInt8.toBitVec_and, UInt8.toBitVec_neg, UInt8.toBitVec_one,
BitVec.negOne_eq_allOnes, BitVec.and_eq_allOnes_iff]
@[simp] theorem UInt16.and_eq_neg_one_iff {a b : UInt16} : a &&& b = -1 a = -1 b = -1 := by
simp only [ UInt16.toBitVec_inj, UInt16.toBitVec_and, UInt16.toBitVec_neg, UInt16.toBitVec_one,
BitVec.negOne_eq_allOnes, BitVec.and_eq_allOnes_iff]
@[simp] theorem UInt32.and_eq_neg_one_iff {a b : UInt32} : a &&& b = -1 a = -1 b = -1 := by
simp only [ UInt32.toBitVec_inj, UInt32.toBitVec_and, UInt32.toBitVec_neg, UInt32.toBitVec_one,
BitVec.negOne_eq_allOnes, BitVec.and_eq_allOnes_iff]
@[simp] theorem UInt64.and_eq_neg_one_iff {a b : UInt64} : a &&& b = -1 a = -1 b = -1 := by
simp only [ UInt64.toBitVec_inj, UInt64.toBitVec_and, UInt64.toBitVec_neg, UInt64.toBitVec_one,
BitVec.negOne_eq_allOnes, BitVec.and_eq_allOnes_iff]
@[simp] theorem USize.and_eq_neg_one_iff {a b : USize} : a &&& b = -1 a = -1 b = -1 := by
simp only [ USize.toBitVec_inj, USize.toBitVec_and, USize.toBitVec_neg, USize.toBitVec_one,
BitVec.negOne_eq_allOnes, BitVec.and_eq_allOnes_iff]
protected theorem UInt8.xor_assoc (a b c : UInt8) : a ^^^ b ^^^ c = a ^^^ (b ^^^ c) := UInt8.toBitVec_inj.1 (BitVec.xor_assoc _ _ _)
protected theorem UInt16.xor_assoc (a b c : UInt16) : a ^^^ b ^^^ c = a ^^^ (b ^^^ c) := UInt16.toBitVec_inj.1 (BitVec.xor_assoc _ _ _)
protected theorem UInt32.xor_assoc (a b c : UInt32) : a ^^^ b ^^^ c = a ^^^ (b ^^^ c) := UInt32.toBitVec_inj.1 (BitVec.xor_assoc _ _ _)
protected theorem UInt64.xor_assoc (a b c : UInt64) : a ^^^ b ^^^ c = a ^^^ (b ^^^ c) := UInt64.toBitVec_inj.1 (BitVec.xor_assoc _ _ _)
protected theorem USize.xor_assoc (a b c : USize) : a ^^^ b ^^^ c = a ^^^ (b ^^^ c) := USize.toBitVec_inj.1 (BitVec.xor_assoc _ _ _)
instance : Std.Associative (α := UInt8) (· ^^^ ·) := UInt8.xor_assoc
instance : Std.Associative (α := UInt16) (· ^^^ ·) := UInt16.xor_assoc
instance : Std.Associative (α := UInt32) (· ^^^ ·) := UInt32.xor_assoc
instance : Std.Associative (α := UInt64) (· ^^^ ·) := UInt64.xor_assoc
instance : Std.Associative (α := USize) (· ^^^ ·) := USize.xor_assoc
protected theorem UInt8.xor_comm (a b : UInt8) : a ^^^ b = b ^^^ a := UInt8.toBitVec_inj.1 (BitVec.xor_comm _ _)
protected theorem UInt16.xor_comm (a b : UInt16) : a ^^^ b = b ^^^ a := UInt16.toBitVec_inj.1 (BitVec.xor_comm _ _)
protected theorem UInt32.xor_comm (a b : UInt32) : a ^^^ b = b ^^^ a := UInt32.toBitVec_inj.1 (BitVec.xor_comm _ _)
protected theorem UInt64.xor_comm (a b : UInt64) : a ^^^ b = b ^^^ a := UInt64.toBitVec_inj.1 (BitVec.xor_comm _ _)
protected theorem USize.xor_comm (a b : USize) : a ^^^ b = b ^^^ a := USize.toBitVec_inj.1 (BitVec.xor_comm _ _)
instance : Std.Commutative (α := UInt8) (· ^^^ ·) := UInt8.xor_comm
instance : Std.Commutative (α := UInt16) (· ^^^ ·) := UInt16.xor_comm
instance : Std.Commutative (α := UInt32) (· ^^^ ·) := UInt32.xor_comm
instance : Std.Commutative (α := UInt64) (· ^^^ ·) := UInt64.xor_comm
instance : Std.Commutative (α := USize) (· ^^^ ·) := USize.xor_comm
@[simp] protected theorem UInt8.xor_self {a : UInt8} : a ^^^ a = 0 := UInt8.toBitVec_inj.1 BitVec.xor_self
@[simp] protected theorem UInt16.xor_self {a : UInt16} : a ^^^ a = 0 := UInt16.toBitVec_inj.1 BitVec.xor_self
@[simp] protected theorem UInt32.xor_self {a : UInt32} : a ^^^ a = 0 := UInt32.toBitVec_inj.1 BitVec.xor_self
@[simp] protected theorem UInt64.xor_self {a : UInt64} : a ^^^ a = 0 := UInt64.toBitVec_inj.1 BitVec.xor_self
@[simp] protected theorem USize.xor_self {a : USize} : a ^^^ a = 0 := USize.toBitVec_inj.1 BitVec.xor_self
@[simp] protected theorem UInt8.xor_zero {a : UInt8} : a ^^^ 0 = a := UInt8.toBitVec_inj.1 BitVec.xor_zero
@[simp] protected theorem UInt16.xor_zero {a : UInt16} : a ^^^ 0 = a := UInt16.toBitVec_inj.1 BitVec.xor_zero
@[simp] protected theorem UInt32.xor_zero {a : UInt32} : a ^^^ 0 = a := UInt32.toBitVec_inj.1 BitVec.xor_zero
@[simp] protected theorem UInt64.xor_zero {a : UInt64} : a ^^^ 0 = a := UInt64.toBitVec_inj.1 BitVec.xor_zero
@[simp] protected theorem USize.xor_zero {a : USize} : a ^^^ 0 = a := USize.toBitVec_inj.1 BitVec.xor_zero
@[simp] protected theorem UInt8.zero_xor {a : UInt8} : 0 ^^^ a = a := UInt8.toBitVec_inj.1 BitVec.zero_xor
@[simp] protected theorem UInt16.zero_xor {a : UInt16} : 0 ^^^ a = a := UInt16.toBitVec_inj.1 BitVec.zero_xor
@[simp] protected theorem UInt32.zero_xor {a : UInt32} : 0 ^^^ a = a := UInt32.toBitVec_inj.1 BitVec.zero_xor
@[simp] protected theorem UInt64.zero_xor {a : UInt64} : 0 ^^^ a = a := UInt64.toBitVec_inj.1 BitVec.zero_xor
@[simp] protected theorem USize.zero_xor {a : USize} : 0 ^^^ a = a := USize.toBitVec_inj.1 BitVec.zero_xor
@[simp] theorem UInt8.neg_one_xor {a : UInt8} : -1 ^^^ a = ~~~a := by
rw [ UInt8.toBitVec_inj, UInt8.toBitVec_xor, UInt8.toBitVec_neg, UInt8.toBitVec_one,
BitVec.negOne_eq_allOnes, BitVec.allOnes_xor, UInt8.toBitVec_not]
@[simp] theorem UInt16.neg_one_xor {a : UInt16} : -1 ^^^ a = ~~~a := by
rw [ UInt16.toBitVec_inj, UInt16.toBitVec_xor, UInt16.toBitVec_neg, UInt16.toBitVec_one,
BitVec.negOne_eq_allOnes, BitVec.allOnes_xor, UInt16.toBitVec_not]
@[simp] theorem UInt32.neg_one_xor {a : UInt32} : -1 ^^^ a = ~~~a := by
rw [ UInt32.toBitVec_inj, UInt32.toBitVec_xor, UInt32.toBitVec_neg, UInt32.toBitVec_one,
BitVec.negOne_eq_allOnes, BitVec.allOnes_xor, UInt32.toBitVec_not]
@[simp] theorem UInt64.neg_one_xor {a : UInt64} : -1 ^^^ a = ~~~a := by
rw [ UInt64.toBitVec_inj, UInt64.toBitVec_xor, UInt64.toBitVec_neg, UInt64.toBitVec_one,
BitVec.negOne_eq_allOnes, BitVec.allOnes_xor, UInt64.toBitVec_not]
@[simp] theorem USize.neg_one_xor {a : USize} : -1 ^^^ a = ~~~a := by
rw [ USize.toBitVec_inj, USize.toBitVec_xor, USize.toBitVec_neg, USize.toBitVec_one,
BitVec.negOne_eq_allOnes, BitVec.allOnes_xor, USize.toBitVec_not]
@[simp] theorem UInt8.xor_neg_one {a : UInt8} : a ^^^ -1 = ~~~a := by rw [UInt8.xor_comm, neg_one_xor]
@[simp] theorem UInt16.xor_neg_one {a : UInt16} : a ^^^ -1 = ~~~a := by rw [UInt16.xor_comm, neg_one_xor]
@[simp] theorem UInt32.xor_neg_one {a : UInt32} : a ^^^ -1 = ~~~a := by rw [UInt32.xor_comm, neg_one_xor]
@[simp] theorem UInt64.xor_neg_one {a : UInt64} : a ^^^ -1 = ~~~a := by rw [UInt64.xor_comm, neg_one_xor]
@[simp] theorem USize.xor_neg_one {a : USize} : a ^^^ -1 = ~~~a := by rw [USize.xor_comm, neg_one_xor]
instance : Std.LawfulCommIdentity (α := UInt8) (· ^^^ ·) 0 where
right_id _ := UInt8.xor_zero
instance : Std.LawfulCommIdentity (α := UInt16) (· ^^^ ·) 0 where
right_id _ := UInt16.xor_zero
instance : Std.LawfulCommIdentity (α := UInt32) (· ^^^ ·) 0 where
right_id _ := UInt32.xor_zero
instance : Std.LawfulCommIdentity (α := UInt64) (· ^^^ ·) 0 where
right_id _ := UInt64.xor_zero
instance : Std.LawfulCommIdentity (α := USize) (· ^^^ ·) 0 where
right_id _ := USize.xor_zero
@[simp] theorem UInt8.xor_eq_zero_iff {a b : UInt8} : a ^^^ b = 0 a = b := by simp [ UInt8.toBitVec_inj]
@[simp] theorem UInt16.xor_eq_zero_iff {a b : UInt16} : a ^^^ b = 0 a = b := by simp [ UInt16.toBitVec_inj]
@[simp] theorem UInt32.xor_eq_zero_iff {a b : UInt32} : a ^^^ b = 0 a = b := by simp [ UInt32.toBitVec_inj]
@[simp] theorem UInt64.xor_eq_zero_iff {a b : UInt64} : a ^^^ b = 0 a = b := by simp [ UInt64.toBitVec_inj]
@[simp] theorem USize.xor_eq_zero_iff {a b : USize} : a ^^^ b = 0 a = b := by simp [ USize.toBitVec_inj]
@[simp] theorem UInt8.xor_left_inj {a b : UInt8} (c : UInt8) : (a ^^^ c = b ^^^ c) a = b := by
simp [ UInt8.toBitVec_inj]
@[simp] theorem UInt16.xor_left_inj {a b : UInt16} (c : UInt16) : (a ^^^ c = b ^^^ c) a = b := by
simp [ UInt16.toBitVec_inj]
@[simp] theorem UInt32.xor_left_inj {a b : UInt32} (c : UInt32) : (a ^^^ c = b ^^^ c) a = b := by
simp [ UInt32.toBitVec_inj]
@[simp] theorem UInt64.xor_left_inj {a b : UInt64} (c : UInt64) : (a ^^^ c = b ^^^ c) a = b := by
simp [ UInt64.toBitVec_inj]
@[simp] theorem USize.xor_left_inj {a b : USize} (c : USize) : (a ^^^ c = b ^^^ c) a = b := by
simp [ USize.toBitVec_inj]
@[simp] theorem UInt8.xor_right_inj {a b : UInt8} (c : UInt8) : (c ^^^ a = c ^^^ b) a = b := by
simp [ UInt8.toBitVec_inj]
@[simp] theorem UInt16.xor_right_inj {a b : UInt16} (c : UInt16) : (c ^^^ a = c ^^^ b) a = b := by
simp [ UInt16.toBitVec_inj]
@[simp] theorem UInt32.xor_right_inj {a b : UInt32} (c : UInt32) : (c ^^^ a = c ^^^ b) a = b := by
simp [ UInt32.toBitVec_inj]
@[simp] theorem UInt64.xor_right_inj {a b : UInt64} (c : UInt64) : (c ^^^ a = c ^^^ b) a = b := by
simp [ UInt64.toBitVec_inj]
@[simp] theorem USize.xor_right_inj {a b : USize} (c : USize) : (c ^^^ a = c ^^^ b) a = b := by
simp [ USize.toBitVec_inj]
@[simp] theorem UInt8.not_zero : ~~~(0 : UInt8) = -1 := rfl
@[simp] theorem UInt16.not_zero : ~~~(0 : UInt16) = -1 := rfl
@[simp] theorem UInt32.not_zero : ~~~(0 : UInt32) = -1 := rfl
@[simp] theorem UInt64.not_zero : ~~~(0 : UInt64) = -1 := rfl
@[simp] theorem USize.not_zero : ~~~(0 : USize) = -1 := by simp [USize.not_eq_neg_sub]
@[simp] theorem UInt8.not_neg_one : ~~~(-1 : UInt8) = 0 := rfl
@[simp] theorem UInt16.not_neg_one : ~~~(-1 : UInt16) = 0 := rfl
@[simp] theorem UInt32.not_neg_one : ~~~(-1 : UInt32) = 0 := rfl
@[simp] theorem UInt64.not_neg_one : ~~~(-1 : UInt64) = 0 := rfl
@[simp] theorem USize.not_neg_one : ~~~(-1 : USize) = 0 := by simp [USize.not_eq_neg_sub]
@[simp] theorem UInt8.not_not {a : UInt8} : ~~~(~~~a) = a := by simp [ UInt8.toBitVec_inj]
@[simp] theorem UInt16.not_not {a : UInt16} : ~~~(~~~a) = a := by simp [ UInt16.toBitVec_inj]
@[simp] theorem UInt32.not_not {a : UInt32} : ~~~(~~~a) = a := by simp [ UInt32.toBitVec_inj]
@[simp] theorem UInt64.not_not {a : UInt64} : ~~~(~~~a) = a := by simp [ UInt64.toBitVec_inj]
@[simp] theorem USize.not_not {a : USize} : ~~~(~~~a) = a := by simp [ USize.toBitVec_inj]
@[simp] theorem UInt8.not_inj {a b : UInt8} : ~~~a = ~~~b a = b := by simp [ UInt8.toBitVec_inj]
@[simp] theorem UInt16.not_inj {a b : UInt16} : ~~~a = ~~~b a = b := by simp [ UInt16.toBitVec_inj]
@[simp] theorem UInt32.not_inj {a b : UInt32} : ~~~a = ~~~b a = b := by simp [ UInt32.toBitVec_inj]
@[simp] theorem UInt64.not_inj {a b : UInt64} : ~~~a = ~~~b a = b := by simp [ UInt64.toBitVec_inj]
@[simp] theorem USize.not_inj {a b : USize} : ~~~a = ~~~b a = b := by simp [ USize.toBitVec_inj]
@[simp] theorem UInt8.and_not_self {a : UInt8} : a &&& ~~~a = 0 := by simp [ UInt8.toBitVec_inj]
@[simp] theorem UInt16.and_not_self {a : UInt16} : a &&& ~~~a = 0 := by simp [ UInt16.toBitVec_inj]
@[simp] theorem UInt32.and_not_self {a : UInt32} : a &&& ~~~a = 0 := by simp [ UInt32.toBitVec_inj]
@[simp] theorem UInt64.and_not_self {a : UInt64} : a &&& ~~~a = 0 := by simp [ UInt64.toBitVec_inj]
@[simp] theorem USize.and_not_self {a : USize} : a &&& ~~~a = 0 := by simp [ USize.toBitVec_inj]
@[simp] theorem UInt8.not_and_self {a : UInt8} : ~~~a &&& a = 0 := by simp [UInt8.and_comm]
@[simp] theorem UInt16.not_and_self {a : UInt16} : ~~~a &&& a = 0 := by simp [UInt16.and_comm]
@[simp] theorem UInt32.not_and_self {a : UInt32} : ~~~a &&& a = 0 := by simp [UInt32.and_comm]
@[simp] theorem UInt64.not_and_self {a : UInt64} : ~~~a &&& a = 0 := by simp [UInt64.and_comm]
@[simp] theorem USize.not_and_self {a : USize} : ~~~a &&& a = 0 := by simp [USize.and_comm]
@[simp] theorem UInt8.or_not_self {a : UInt8} : a ||| ~~~a = -1 := by
rw [ UInt8.toBitVec_inj, UInt8.toBitVec_or, UInt8.toBitVec_not, BitVec.or_not_self,
UInt8.toBitVec_neg, UInt8.toBitVec_one, BitVec.negOne_eq_allOnes]
@[simp] theorem UInt16.or_not_self {a : UInt16} : a ||| ~~~a = -1 := by
rw [ UInt16.toBitVec_inj, UInt16.toBitVec_or, UInt16.toBitVec_not, BitVec.or_not_self,
UInt16.toBitVec_neg, UInt16.toBitVec_one, BitVec.negOne_eq_allOnes]
@[simp] theorem UInt32.or_not_self {a : UInt32} : a ||| ~~~a = -1 := by
rw [ UInt32.toBitVec_inj, UInt32.toBitVec_or, UInt32.toBitVec_not, BitVec.or_not_self,
UInt32.toBitVec_neg, UInt32.toBitVec_one, BitVec.negOne_eq_allOnes]
@[simp] theorem UInt64.or_not_self {a : UInt64} : a ||| ~~~a = -1 := by
rw [ UInt64.toBitVec_inj, UInt64.toBitVec_or, UInt64.toBitVec_not, BitVec.or_not_self,
UInt64.toBitVec_neg, UInt64.toBitVec_one, BitVec.negOne_eq_allOnes]
@[simp] theorem USize.or_not_self {a : USize} : a ||| ~~~a = -1 := by
rw [ USize.toBitVec_inj, USize.toBitVec_or, USize.toBitVec_not, BitVec.or_not_self,
USize.toBitVec_neg, USize.toBitVec_one, BitVec.negOne_eq_allOnes]
@[simp] theorem UInt8.not_or_self {a : UInt8} : ~~~a ||| a = -1 := by simp [UInt8.or_comm]
@[simp] theorem UInt16.not_or_self {a : UInt16} : ~~~a ||| a = -1 := by simp [UInt16.or_comm]
@[simp] theorem UInt32.not_or_self {a : UInt32} : ~~~a ||| a = -1 := by simp [UInt32.or_comm]
@[simp] theorem UInt64.not_or_self {a : UInt64} : ~~~a ||| a = -1 := by simp [UInt64.or_comm]
@[simp] theorem USize.not_or_self {a : USize} : ~~~a ||| a = -1 := by simp [USize.or_comm]
theorem UInt8.not_eq_comm {a b : UInt8} : ~~~a = b a = ~~~b := by
simp [ UInt8.toBitVec_inj, BitVec.not_eq_comm]
theorem UInt16.not_eq_comm {a b : UInt16} : ~~~a = b a = ~~~b := by
simp [ UInt16.toBitVec_inj, BitVec.not_eq_comm]
theorem UInt32.not_eq_comm {a b : UInt32} : ~~~a = b a = ~~~b := by
simp [ UInt32.toBitVec_inj, BitVec.not_eq_comm]
theorem UInt64.not_eq_comm {a b : UInt64} : ~~~a = b a = ~~~b := by
simp [ UInt64.toBitVec_inj, BitVec.not_eq_comm]
theorem USize.not_eq_comm {a b : USize} : ~~~a = b a = ~~~b := by
simp [ USize.toBitVec_inj, BitVec.not_eq_comm]
@[simp] theorem UInt8.ne_not_self {a : UInt8} : a ~~~a := by simp [ UInt8.toBitVec_inj]
@[simp] theorem UInt16.ne_not_self {a : UInt16} : a ~~~a := by simp [ UInt16.toBitVec_inj]
@[simp] theorem UInt32.ne_not_self {a : UInt32} : a ~~~a := by simp [ UInt32.toBitVec_inj]
@[simp] theorem UInt64.ne_not_self {a : UInt64} : a ~~~a := by simp [ UInt64.toBitVec_inj]
@[simp] theorem USize.ne_not_self {a : USize} : a ~~~a := by simp [ USize.toBitVec_inj]
@[simp] theorem UInt8.not_ne_self {a : UInt8} : ~~~a a := by simp [ UInt8.toBitVec_inj]
@[simp] theorem UInt16.not_ne_self {a : UInt16} : ~~~a a := by simp [ UInt16.toBitVec_inj]
@[simp] theorem UInt32.not_ne_self {a : UInt32} : ~~~a a := by simp [ UInt32.toBitVec_inj]
@[simp] theorem UInt64.not_ne_self {a : UInt64} : ~~~a a := by simp [ UInt64.toBitVec_inj]
@[simp] theorem USize.not_ne_self {a : USize} : ~~~a a := by simp [ USize.toBitVec_inj]
theorem UInt8.not_xor {a b : UInt8} : ~~~a ^^^ b = ~~~(a ^^^ b) := by
simp [ UInt8.toBitVec_inj, BitVec.not_xor_left]
theorem UInt16.not_xor {a b : UInt16} : ~~~a ^^^ b = ~~~(a ^^^ b) := by
simp [ UInt16.toBitVec_inj, BitVec.not_xor_left]
theorem UInt32.not_xor {a b : UInt32} : ~~~a ^^^ b = ~~~(a ^^^ b) := by
simp [ UInt32.toBitVec_inj, BitVec.not_xor_left]
theorem UInt64.not_xor {a b : UInt64} : ~~~a ^^^ b = ~~~(a ^^^ b) := by
simp [ UInt64.toBitVec_inj, BitVec.not_xor_left]
theorem USize.not_xor {a b : USize} : ~~~a ^^^ b = ~~~(a ^^^ b) := by
simp [ USize.toBitVec_inj, BitVec.not_xor_left]
theorem UInt8.xor_not {a b : UInt8} : a ^^^ ~~~b = ~~~(a ^^^ b) := by
simp [ UInt8.toBitVec_inj, BitVec.not_xor_right]
theorem UInt16.xor_not {a b : UInt16} : a ^^^ ~~~b = ~~~(a ^^^ b) := by
simp [ UInt16.toBitVec_inj, BitVec.not_xor_right]
theorem UInt32.xor_not {a b : UInt32} : a ^^^ ~~~b = ~~~(a ^^^ b) := by
simp [ UInt32.toBitVec_inj, BitVec.not_xor_right]
theorem UInt64.xor_not {a b : UInt64} : a ^^^ ~~~b = ~~~(a ^^^ b) := by
simp [ UInt64.toBitVec_inj, BitVec.not_xor_right]
theorem USize.xor_not {a b : USize} : a ^^^ ~~~b = ~~~(a ^^^ b) := by
simp [ USize.toBitVec_inj, BitVec.not_xor_right]
@[simp] theorem UInt8.shiftLeft_zero {a : UInt8} : a <<< 0 = a := by simp [ UInt8.toBitVec_inj]
@[simp] theorem UInt16.shiftLeft_zero {a : UInt16} : a <<< 0 = a := by simp [ UInt16.toBitVec_inj]
@[simp] theorem UInt32.shiftLeft_zero {a : UInt32} : a <<< 0 = a := by simp [ UInt32.toBitVec_inj]
@[simp] theorem UInt64.shiftLeft_zero {a : UInt64} : a <<< 0 = a := by simp [ UInt64.toBitVec_inj]
@[simp] theorem USize.shiftLeft_zero {a : USize} : a <<< 0 = a := by simp [ USize.toBitVec_inj]
@[simp] theorem UInt8.zero_shiftLeft {a : UInt8} : 0 <<< a = 0 := by simp [ UInt8.toBitVec_inj]
@[simp] theorem UInt16.zero_shiftLeft {a : UInt16} : 0 <<< a = 0 := by simp [ UInt16.toBitVec_inj]
@[simp] theorem UInt32.zero_shiftLeft {a : UInt32} : 0 <<< a = 0 := by simp [ UInt32.toBitVec_inj]
@[simp] theorem UInt64.zero_shiftLeft {a : UInt64} : 0 <<< a = 0 := by simp [ UInt64.toBitVec_inj]
@[simp] theorem USize.zero_shiftLeft {a : USize} : 0 <<< a = 0 := by simp [ USize.toBitVec_inj]
theorem UInt8.shiftLeft_xor {a b c : UInt8} : (a ^^^ b) <<< c = (a <<< c) ^^^ (b <<< c) := by
simp [ UInt8.toBitVec_inj, BitVec.shiftLeft_xor_distrib]
theorem UInt16.shiftLeft_xor {a b c : UInt16} : (a ^^^ b) <<< c = (a <<< c) ^^^ (b <<< c) := by
simp [ UInt16.toBitVec_inj, BitVec.shiftLeft_xor_distrib]
theorem UInt32.shiftLeft_xor {a b c : UInt32} : (a ^^^ b) <<< c = (a <<< c) ^^^ (b <<< c) := by
simp [ UInt32.toBitVec_inj, BitVec.shiftLeft_xor_distrib]
theorem UInt64.shiftLeft_xor {a b c : UInt64} : (a ^^^ b) <<< c = (a <<< c) ^^^ (b <<< c) := by
simp [ UInt64.toBitVec_inj, BitVec.shiftLeft_xor_distrib]
theorem USize.shiftLeft_xor {a b c : USize} : (a ^^^ b) <<< c = (a <<< c) ^^^ (b <<< c) := by
simp [ USize.toBitVec_inj, BitVec.shiftLeft_xor_distrib]
theorem UInt8.shiftLeft_and {a b c : UInt8} : (a &&& b) <<< c = (a <<< c) &&& (b <<< c) := by
simp [ UInt8.toBitVec_inj, BitVec.shiftLeft_and_distrib]
theorem UInt16.shiftLeft_and {a b c : UInt16} : (a &&& b) <<< c = (a <<< c) &&& (b <<< c) := by
simp [ UInt16.toBitVec_inj, BitVec.shiftLeft_and_distrib]
theorem UInt32.shiftLeft_and {a b c : UInt32} : (a &&& b) <<< c = (a <<< c) &&& (b <<< c) := by
simp [ UInt32.toBitVec_inj, BitVec.shiftLeft_and_distrib]
theorem UInt64.shiftLeft_and {a b c : UInt64} : (a &&& b) <<< c = (a <<< c) &&& (b <<< c) := by
simp [ UInt64.toBitVec_inj, BitVec.shiftLeft_and_distrib]
theorem USize.shiftLeft_and {a b c : USize} : (a &&& b) <<< c = (a <<< c) &&& (b <<< c) := by
simp [ USize.toBitVec_inj, BitVec.shiftLeft_and_distrib]
theorem UInt8.shiftLeft_or {a b c : UInt8} : (a ||| b) <<< c = (a <<< c) ||| (b <<< c) := by
simp [ UInt8.toBitVec_inj, BitVec.shiftLeft_or_distrib]
theorem UInt16.shiftLeft_or {a b c : UInt16} : (a ||| b) <<< c = (a <<< c) ||| (b <<< c) := by
simp [ UInt16.toBitVec_inj, BitVec.shiftLeft_or_distrib]
theorem UInt32.shiftLeft_or {a b c : UInt32} : (a ||| b) <<< c = (a <<< c) ||| (b <<< c) := by
simp [ UInt32.toBitVec_inj, BitVec.shiftLeft_or_distrib]
theorem UInt64.shiftLeft_or {a b c : UInt64} : (a ||| b) <<< c = (a <<< c) ||| (b <<< c) := by
simp [ UInt64.toBitVec_inj, BitVec.shiftLeft_or_distrib]
theorem USize.shiftLeft_or {a b c : USize} : (a ||| b) <<< c = (a <<< c) ||| (b <<< c) := by
simp [ USize.toBitVec_inj, BitVec.shiftLeft_or_distrib]
theorem UInt8.shiftLeft_add_of_toNat_lt {a b c : UInt8} (h : b.toNat + c.toNat < 8) :
a <<< (b + c) = (a <<< b) <<< c := by
simp [ UInt8.toBitVec_inj, Nat.mod_eq_of_lt h, Nat.mod_eq_of_lt (show b.toNat < 8 by omega),
Nat.mod_eq_of_lt (show c.toNat < 8 by omega), BitVec.shiftLeft_add]
theorem UInt16.shiftLeft_add_of_toNat_lt {a b c : UInt16} (h : b.toNat + c.toNat < 16) :
a <<< (b + c) = (a <<< b) <<< c := by
simp [ UInt16.toBitVec_inj, Nat.mod_eq_of_lt h, Nat.mod_eq_of_lt (show b.toNat < 16 by omega),
Nat.mod_eq_of_lt (show c.toNat < 16 by omega), BitVec.shiftLeft_add]
theorem UInt32.shiftLeft_add_of_toNat_lt {a b c : UInt32} (h : b.toNat + c.toNat < 32) :
a <<< (b + c) = (a <<< b) <<< c := by
simp [ UInt32.toBitVec_inj, Nat.mod_eq_of_lt h, Nat.mod_eq_of_lt (show b.toNat < 32 by omega),
Nat.mod_eq_of_lt (show c.toNat < 32 by omega), BitVec.shiftLeft_add]
theorem UInt64.shiftLeft_add_of_toNat_lt {a b c : UInt64} (h : b.toNat + c.toNat < 64) :
a <<< (b + c) = (a <<< b) <<< c := by
simp [ UInt64.toBitVec_inj, Nat.mod_eq_of_lt h, Nat.mod_eq_of_lt (show b.toNat < 64 by omega),
Nat.mod_eq_of_lt (show c.toNat < 64 by omega), BitVec.shiftLeft_add]
theorem USize.shiftLeft_add_of_toNat_lt {a b c : USize}
(h : b.toNat + c.toNat < System.Platform.numBits) :
a <<< (b + c) = (a <<< b) <<< c := by
simp only [ USize.toBitVec_inj, USize.toBitVec_shiftLeft, USize.toBitVec_add,
BitVec.natCast_eq_ofNat, BitVec.shiftLeft_eq', BitVec.toNat_umod, BitVec.toNat_add,
toNat_toBitVec, BitVec.toNat_ofNat, Nat.mod_two_pow_self]
rw [Nat.mod_eq_of_lt, Nat.mod_eq_of_lt, Nat.mod_eq_of_lt, Nat.mod_eq_of_lt, BitVec.shiftLeft_add]
· omega
· omega
· exact Nat.lt_trans h Nat.lt_two_pow_self
· exact Nat.lt_of_le_of_lt (Nat.mod_le _ _) h
theorem UInt8.shiftLeft_add {a b c : UInt8} (hb : b < 8) (hc : c < 8) (hbc : b + c < 8) :
a <<< (b + c) = (a <<< b) <<< c := by
apply UInt8.shiftLeft_add_of_toNat_lt
have hb : b.toNat < 8 := by simpa [lt_iff_toNat_lt] using hb
have hc : c.toNat < 8 := by simpa [lt_iff_toNat_lt] using hc
simp only [lt_iff_toNat_lt, UInt8.toNat_add, Nat.reducePow, UInt8.reduceToNat] at hbc
rwa [Nat.mod_eq_of_lt (by omega)] at hbc
theorem UInt16.shiftLeft_add {a b c : UInt16} (hb : b < 16) (hc : c < 16) (hbc : b + c < 16) :
a <<< (b + c) = (a <<< b) <<< c := by
apply UInt16.shiftLeft_add_of_toNat_lt
have hb : b.toNat < 16 := by simpa [lt_iff_toNat_lt] using hb
have hc : c.toNat < 16 := by simpa [lt_iff_toNat_lt] using hc
simp only [lt_iff_toNat_lt, UInt16.toNat_add, Nat.reducePow, UInt16.reduceToNat] at hbc
rwa [Nat.mod_eq_of_lt (by omega)] at hbc
theorem UInt32.shiftLeft_add {a b c : UInt32} (hb : b < 32) (hc : c < 32) (hbc : b + c < 32) :
a <<< (b + c) = (a <<< b) <<< c := by
apply UInt32.shiftLeft_add_of_toNat_lt
have hb : b.toNat < 32 := by simpa [lt_iff_toNat_lt] using hb
have hc : c.toNat < 32 := by simpa [lt_iff_toNat_lt] using hc
simp only [lt_iff_toNat_lt, UInt32.toNat_add, Nat.reducePow, UInt32.reduceToNat] at hbc
rwa [Nat.mod_eq_of_lt (by omega)] at hbc
theorem UInt64.shiftLeft_add {a b c : UInt64} (hb : b < 64) (hc : c < 64) (hbc : b + c < 64) :
a <<< (b + c) = (a <<< b) <<< c := by
apply UInt64.shiftLeft_add_of_toNat_lt
have hb : b.toNat < 64 := by simpa [lt_iff_toNat_lt] using hb
have hc : c.toNat < 64 := by simpa [lt_iff_toNat_lt] using hc
simp only [lt_iff_toNat_lt, UInt64.toNat_add, Nat.reducePow, UInt64.reduceToNat] at hbc
rwa [Nat.mod_eq_of_lt (by omega)] at hbc
theorem USize.shiftLeft_add {a b c : USize} (hb : b < USize.ofNat System.Platform.numBits)
(hc : c < USize.ofNat System.Platform.numBits) (hbc : b + c < USize.ofNat System.Platform.numBits) :
a <<< (b + c) = (a <<< b) <<< c := by
apply USize.shiftLeft_add_of_toNat_lt
have hb : b.toNat < System.Platform.numBits := by simpa [lt_iff_toNat_lt] using hb
have hc : c.toNat < System.Platform.numBits := by simpa [lt_iff_toNat_lt] using hc
simp only [lt_iff_toNat_lt, USize.toNat_add, toNat_ofNat', Nat.mod_two_pow_self] at hbc
rwa [Nat.mod_eq_of_lt] at hbc
cases System.Platform.numBits_eq <;> simp_all <;> omega
@[simp] theorem UInt8.neg_one_shiftLeft_and_shiftLeft {a b : UInt8} :
(-1) <<< b &&& a <<< b = a <<< b := by simp [ UInt8.shiftLeft_and]
@[simp] theorem UInt16.neg_one_shiftLeft_and_shiftLeft {a b : UInt16} :
(-1) <<< b &&& a <<< b = a <<< b := by simp [ UInt16.shiftLeft_and]
@[simp] theorem UInt32.neg_one_shiftLeft_and_shiftLeft {a b : UInt32} :
(-1) <<< b &&& a <<< b = a <<< b := by simp [ UInt32.shiftLeft_and]
@[simp] theorem UInt64.neg_one_shiftLeft_and_shiftLeft {a b : UInt64} :
(-1) <<< b &&& a <<< b = a <<< b := by simp [ UInt64.shiftLeft_and]
@[simp] theorem USize.neg_one_shiftLeft_and_shiftLeft {a b : USize} :
(-1) <<< b &&& a <<< b = a <<< b := by simp [ USize.shiftLeft_and]
@[simp] theorem UInt8.neg_one_shiftLeft_or_shiftLeft {a b : UInt8} :
(-1) <<< b ||| a <<< b = (-1) <<< b := by simp [ UInt8.shiftLeft_or]
@[simp] theorem UInt16.neg_one_shiftLeft_or_shiftLeft {a b : UInt16} :
(-1) <<< b ||| a <<< b = (-1) <<< b := by simp [ UInt16.shiftLeft_or]
@[simp] theorem UInt32.neg_one_shiftLeft_or_shiftLeft {a b : UInt32} :
(-1) <<< b ||| a <<< b = (-1) <<< b := by simp [ UInt32.shiftLeft_or]
@[simp] theorem UInt64.neg_one_shiftLeft_or_shiftLeft {a b : UInt8} :
(-1) <<< b ||| a <<< b = (-1) <<< b := by simp [ UInt64.shiftLeft_or]
@[simp] theorem USize.neg_one_shiftLeft_or_shiftLeft {a b : USize} :
(-1) <<< b ||| a <<< b = (-1) <<< b := by simp [ USize.shiftLeft_or]
@[simp] theorem UInt8.shiftRight_zero {a : UInt8} : a >>> 0 = a := by simp [ UInt8.toBitVec_inj]
@[simp] theorem UInt16.shiftRight_zero {a : UInt16} : a >>> 0 = a := by simp [ UInt16.toBitVec_inj]
@[simp] theorem UInt32.shiftRight_zero {a : UInt32} : a >>> 0 = a := by simp [ UInt32.toBitVec_inj]
@[simp] theorem UInt64.shiftRight_zero {a : UInt64} : a >>> 0 = a := by simp [ UInt64.toBitVec_inj]
@[simp] theorem USize.shiftRight_zero {a : USize} : a >>> 0 = a := by simp [ USize.toBitVec_inj]
@[simp] theorem UInt8.zero_shiftRight {a : UInt8} : 0 >>> a = 0 := by simp [ UInt8.toBitVec_inj]
@[simp] theorem UInt16.zero_shiftRight {a : UInt16} : 0 >>> a = 0 := by simp [ UInt16.toBitVec_inj]
@[simp] theorem UInt32.zero_shiftRight {a : UInt32} : 0 >>> a = 0 := by simp [ UInt32.toBitVec_inj]
@[simp] theorem UInt64.zero_shiftRight {a : UInt64} : 0 >>> a = 0 := by simp [ UInt64.toBitVec_inj]
@[simp] theorem USize.zero_shiftRight {a : USize} : 0 >>> a = 0 := by simp [ USize.toBitVec_inj]
theorem UInt8.shiftRight_xor {a b c : UInt8} : (a ^^^ b) >>> c = (a >>> c) ^^^ (b >>> c) := by
simp [ UInt8.toBitVec_inj, BitVec.ushiftRight_xor_distrib]
theorem UInt16.shiftRight_xor {a b c : UInt16} : (a ^^^ b) >>> c = (a >>> c) ^^^ (b >>> c) := by
simp [ UInt16.toBitVec_inj, BitVec.ushiftRight_xor_distrib]
theorem UInt32.shiftRight_xor {a b c : UInt32} : (a ^^^ b) >>> c = (a >>> c) ^^^ (b >>> c) := by
simp [ UInt32.toBitVec_inj, BitVec.ushiftRight_xor_distrib]
theorem UInt64.shiftRight_xor {a b c : UInt64} : (a ^^^ b) >>> c = (a >>> c) ^^^ (b >>> c) := by
simp [ UInt64.toBitVec_inj, BitVec.ushiftRight_xor_distrib]
theorem USize.shiftRight_xor {a b c : USize} : (a ^^^ b) >>> c = (a >>> c) ^^^ (b >>> c) := by
simp [ USize.toBitVec_inj, BitVec.ushiftRight_xor_distrib]
theorem UInt8.shiftRight_and {a b c : UInt8} : (a &&& b) >>> c = (a >>> c) &&& (b >>> c) := by
simp [ UInt8.toBitVec_inj, BitVec.ushiftRight_and_distrib]
theorem UInt16.shiftRight_and {a b c : UInt16} : (a &&& b) >>> c = (a >>> c) &&& (b >>> c) := by
simp [ UInt16.toBitVec_inj, BitVec.ushiftRight_and_distrib]
theorem UInt32.shiftRight_and {a b c : UInt32} : (a &&& b) >>> c = (a >>> c) &&& (b >>> c) := by
simp [ UInt32.toBitVec_inj, BitVec.ushiftRight_and_distrib]
theorem UInt64.shiftRight_and {a b c : UInt64} : (a &&& b) >>> c = (a >>> c) &&& (b >>> c) := by
simp [ UInt64.toBitVec_inj, BitVec.ushiftRight_and_distrib]
theorem USize.shiftRight_and {a b c : USize} : (a &&& b) >>> c = (a >>> c) &&& (b >>> c) := by
simp [ USize.toBitVec_inj, BitVec.ushiftRight_and_distrib]
theorem UInt8.shiftRight_or {a b c : UInt8} : (a ||| b) >>> c = (a >>> c) ||| (b >>> c) := by
simp [ UInt8.toBitVec_inj, BitVec.ushiftRight_or_distrib]
theorem UInt16.shiftRight_or {a b c : UInt16} : (a ||| b) >>> c = (a >>> c) ||| (b >>> c) := by
simp [ UInt16.toBitVec_inj, BitVec.ushiftRight_or_distrib]
theorem UInt32.shiftRight_or {a b c : UInt32} : (a ||| b) >>> c = (a >>> c) ||| (b >>> c) := by
simp [ UInt32.toBitVec_inj, BitVec.ushiftRight_or_distrib]
theorem UInt64.shiftRight_or {a b c : UInt64} : (a ||| b) >>> c = (a >>> c) ||| (b >>> c) := by
simp [ UInt64.toBitVec_inj, BitVec.ushiftRight_or_distrib]
theorem USize.shiftRight_or {a b c : USize} : (a ||| b) >>> c = (a >>> c) ||| (b >>> c) := by
simp [ USize.toBitVec_inj, BitVec.ushiftRight_or_distrib]
theorem UInt8.and_le_right {a b : UInt8} : a &&& b b := by
simpa [UInt8.le_iff_toNat_le] using Nat.and_le_right
theorem UInt16.and_le_right {a b : UInt16} : a &&& b b := by
simpa [UInt16.le_iff_toNat_le] using Nat.and_le_right
theorem UInt32.and_le_right {a b : UInt32} : a &&& b b := by
simpa [UInt32.le_iff_toNat_le] using Nat.and_le_right
theorem UInt64.and_le_right {a b : UInt64} : a &&& b b := by
simpa [UInt64.le_iff_toNat_le] using Nat.and_le_right
theorem USize.and_le_right {a b : USize} : a &&& b b := by
simpa [USize.le_iff_toNat_le] using Nat.and_le_right
theorem UInt8.and_le_left {a b : UInt8} : a &&& b a := by
simpa [UInt8.le_iff_toNat_le] using Nat.and_le_left
theorem UInt16.and_le_left {a b : UInt16} : a &&& b a := by
simpa [UInt16.le_iff_toNat_le] using Nat.and_le_left
theorem UInt32.and_le_left {a b : UInt32} : a &&& b a := by
simpa [UInt32.le_iff_toNat_le] using Nat.and_le_left
theorem UInt64.and_le_left {a b : UInt64} : a &&& b a := by
simpa [UInt64.le_iff_toNat_le] using Nat.and_le_left
theorem USize.and_le_left {a b : USize} : a &&& b a := by
simpa [USize.le_iff_toNat_le] using Nat.and_le_left
theorem UInt8.left_le_or {a b : UInt8} : a a ||| b := by
simpa [UInt8.le_iff_toNat_le] using Nat.left_le_or
theorem UInt16.left_le_or {a b : UInt16} : a a ||| b := by
simpa [UInt16.le_iff_toNat_le] using Nat.left_le_or
theorem UInt32.left_le_or {a b : UInt32} : a a ||| b := by
simpa [UInt32.le_iff_toNat_le] using Nat.left_le_or
theorem UInt64.left_le_or {a b : UInt64} : a a ||| b := by
simpa [UInt64.le_iff_toNat_le] using Nat.left_le_or
theorem USize.left_le_or {a b : USize} : a a ||| b := by
simpa [USize.le_iff_toNat_le] using Nat.left_le_or
theorem UInt8.right_le_or {a b : UInt8} : b a ||| b := by
simpa [UInt8.le_iff_toNat_le] using Nat.right_le_or
theorem UInt16.right_le_or {a b : UInt16} : b a ||| b := by
simpa [UInt16.le_iff_toNat_le] using Nat.right_le_or
theorem UInt32.right_le_or {a b : UInt32} : b a ||| b := by
simpa [UInt32.le_iff_toNat_le] using Nat.right_le_or
theorem UInt64.right_le_or {a b : UInt64} : b a ||| b := by
simpa [UInt64.le_iff_toNat_le] using Nat.right_le_or
theorem USize.right_le_or {a b : USize} : b a ||| b := by
simpa [USize.le_iff_toNat_le] using Nat.right_le_or

File diff suppressed because it is too large Load Diff

View File

@@ -2783,7 +2783,7 @@ theorem any_eq_not_all_not {xs : Vector α n} {p : α → Bool} : xs.any p = !xs
simp
@[simp] theorem all_filter {xs : Vector α n} {p q : α Bool} :
(xs.filter p).all q = xs.all fun a => !(p a) || q a := by
(xs.filter p).all q = xs.all fun a => p a q a := by
rcases xs with xs, rfl
simp

View File

@@ -15,13 +15,3 @@ instance (priority := 300) Zero.toOfNat0 {α} [Zero α] : OfNat α (nat_lit 0) w
instance (priority := 200) Zero.ofOfNat0 {α} [OfNat α (nat_lit 0)] : Zero α where
zero := 0
/-!
Instances converting between `One α` and `OfNat α (nat_lit 1)`.
-/
instance (priority := 300) One.toOfNat1 {α} [One α] : OfNat α (nat_lit 1) where
ofNat := One α.1
instance (priority := 200) One.ofOfNat1 {α} [OfNat α (nat_lit 1)] : One α where
one := 1

View File

@@ -57,11 +57,10 @@ private opaque DynamicPointed : NonemptyType.{0} :=
Name × NonScalar, inferInstance
/--
A type-tagged union that can store any type with a `TypeName` instance.
Type-tagged union that can store any type with a `TypeName` instance.
This is roughly equivalent to `(α : Type) × TypeName α × α`, but without the universe bump. Use
`Dynamic.mk` to inject a value into `Dynamic` from another type, and `Dynamic.get?` to extract a
value from `Dynamic` if it has some expected type.
This is roughly equivalent to `(α : Type) × TypeName α × α` but without the
universe bump.
-/
def Dynamic : Type := DynamicPointed.type
@@ -93,10 +92,5 @@ opaque Dynamic.get? (α) (any : Dynamic) [TypeName α] : Option α
private unsafe def Dynamic.mkImpl [TypeName α] (obj : α) : Dynamic :=
unsafeCast (TypeName.typeName α, (unsafeCast obj : NonScalar))
/--
Stores the provided value in a `Dynamic`.
Use `Dynamic.get? α` to retrieve it.
-/
@[implemented_by Dynamic.mkImpl]
opaque Dynamic.mk [TypeName α] (obj : α) : Dynamic

View File

@@ -12,4 +12,3 @@ import Init.Grind.Propagator
import Init.Grind.Util
import Init.Grind.Offset
import Init.Grind.PP
import Init.Grind.CommRing

View File

@@ -1,11 +0,0 @@
/-
Copyright (c) 2025 Lean FRO, LLC. or its affiliates. All Rights Reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Kim Morrison
-/
prelude
import Init.Grind.CommRing.Basic
import Init.Grind.CommRing.Int
import Init.Grind.CommRing.UInt
import Init.Grind.CommRing.SInt
import Init.Grind.CommRing.BitVec

View File

@@ -1,47 +0,0 @@
/-
Copyright (c) 2025 Lean FRO, LLC. or its affiliates. All Rights Reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Kim Morrison
-/
prelude
import Init.Data.Zero
/-!
# A monolithic commutative ring typeclass for internal use in `grind`.
-/
namespace Lean.Grind
class CommRing (α : Type u) extends Add α, Zero α, Mul α, One α, Neg α where
add_assoc : a b c : α, a + b + c = a + (b + c)
add_comm : a b : α, a + b = b + a
add_zero : a : α, a + 0 = a
neg_add_cancel : a : α, -a + a = 0
mul_assoc : a b c : α, a * b * c = a * (b * c)
mul_comm : a b : α, a * b = b * a
mul_one : a : α, a * 1 = a
left_distrib : a b c : α, a * (b + c) = a * b + a * c
zero_mul : a : α, 0 * a = 0
namespace CommRing
variable {α : Type u} [CommRing α]
theorem zero_add (a : α) : 0 + a = a := by
rw [add_comm, add_zero]
theorem add_neg_cancel (a : α) : a + -a = 0 := by
rw [add_comm, neg_add_cancel]
theorem one_mul (a : α) : 1 * a = a := by
rw [mul_comm, mul_one]
theorem right_distrib (a b c : α) : (a + b) * c = a * c + b * c := by
rw [mul_comm, left_distrib, mul_comm c, mul_comm c]
theorem mul_zero (a : α) : a * 0 = 0 := by
rw [mul_comm, zero_mul]
end CommRing
end Lean.Grind

View File

@@ -1,23 +0,0 @@
/-
Copyright (c) 2025 Lean FRO, LLC. or its affiliates. All Rights Reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Kim Morrison
-/
prelude
import Init.Grind.CommRing.Basic
import Init.Data.BitVec.Lemmas
namespace Lean.Grind
instance : CommRing (BitVec w) where
add_assoc := BitVec.add_assoc
add_comm := BitVec.add_comm
add_zero := BitVec.add_zero
neg_add_cancel := BitVec.add_left_neg
mul_assoc := BitVec.mul_assoc
mul_comm := BitVec.mul_comm
mul_one := BitVec.mul_one
left_distrib _ _ _ := BitVec.mul_add
zero_mul _ := BitVec.zero_mul
end Lean.Grind

View File

@@ -1,23 +0,0 @@
/-
Copyright (c) 2025 Lean FRO, LLC. or its affiliates. All Rights Reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Kim Morrison
-/
prelude
import Init.Grind.CommRing.Basic
import Init.Data.Int.Lemmas
namespace Lean.Grind
instance : CommRing Int where
add_assoc := Int.add_assoc
add_comm := Int.add_comm
add_zero := Int.add_zero
neg_add_cancel := Int.add_left_neg
mul_assoc := Int.mul_assoc
mul_comm := Int.mul_comm
mul_one := Int.mul_one
left_distrib := Int.mul_add
zero_mul := Int.zero_mul
end Lean.Grind

View File

@@ -1,67 +0,0 @@
/-
Copyright (c) 2025 Lean FRO, LLC. or its affiliates. All Rights Reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Kim Morrison
-/
prelude
import Init.Grind.CommRing.Basic
import Init.Data.SInt.Lemmas
namespace Lean.Grind
instance : CommRing Int8 where
add_assoc := Int8.add_assoc
add_comm := Int8.add_comm
add_zero := Int8.add_zero
neg_add_cancel := Int8.add_left_neg
mul_assoc := Int8.mul_assoc
mul_comm := Int8.mul_comm
mul_one := Int8.mul_one
left_distrib _ _ _ := Int8.mul_add
zero_mul _ := Int8.zero_mul
instance : CommRing Int16 where
add_assoc := Int16.add_assoc
add_comm := Int16.add_comm
add_zero := Int16.add_zero
neg_add_cancel := Int16.add_left_neg
mul_assoc := Int16.mul_assoc
mul_comm := Int16.mul_comm
mul_one := Int16.mul_one
left_distrib _ _ _ := Int16.mul_add
zero_mul _ := Int16.zero_mul
instance : CommRing Int32 where
add_assoc := Int32.add_assoc
add_comm := Int32.add_comm
add_zero := Int32.add_zero
neg_add_cancel := Int32.add_left_neg
mul_assoc := Int32.mul_assoc
mul_comm := Int32.mul_comm
mul_one := Int32.mul_one
left_distrib _ _ _ := Int32.mul_add
zero_mul _ := Int32.zero_mul
instance : CommRing Int64 where
add_assoc := Int64.add_assoc
add_comm := Int64.add_comm
add_zero := Int64.add_zero
neg_add_cancel := Int64.add_left_neg
mul_assoc := Int64.mul_assoc
mul_comm := Int64.mul_comm
mul_one := Int64.mul_one
left_distrib _ _ _ := Int64.mul_add
zero_mul _ := Int64.zero_mul
instance : CommRing ISize where
add_assoc := ISize.add_assoc
add_comm := ISize.add_comm
add_zero := ISize.add_zero
neg_add_cancel := ISize.add_left_neg
mul_assoc := ISize.mul_assoc
mul_comm := ISize.mul_comm
mul_one := ISize.mul_one
left_distrib _ _ _ := ISize.mul_add
zero_mul _ := ISize.zero_mul
end Lean.Grind

View File

@@ -1,67 +0,0 @@
/-
Copyright (c) 2025 Lean FRO, LLC. or its affiliates. All Rights Reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Kim Morrison
-/
prelude
import Init.Grind.CommRing.Basic
import Init.Data.UInt.Lemmas
namespace Lean.Grind
instance : CommRing UInt8 where
add_assoc := UInt8.add_assoc
add_comm := UInt8.add_comm
add_zero := UInt8.add_zero
neg_add_cancel := UInt8.add_left_neg
mul_assoc := UInt8.mul_assoc
mul_comm := UInt8.mul_comm
mul_one := UInt8.mul_one
left_distrib _ _ _ := UInt8.mul_add
zero_mul _ := UInt8.zero_mul
instance : CommRing UInt16 where
add_assoc := UInt16.add_assoc
add_comm := UInt16.add_comm
add_zero := UInt16.add_zero
neg_add_cancel := UInt16.add_left_neg
mul_assoc := UInt16.mul_assoc
mul_comm := UInt16.mul_comm
mul_one := UInt16.mul_one
left_distrib _ _ _ := UInt16.mul_add
zero_mul _ := UInt16.zero_mul
instance : CommRing UInt32 where
add_assoc := UInt32.add_assoc
add_comm := UInt32.add_comm
add_zero := UInt32.add_zero
neg_add_cancel := UInt32.add_left_neg
mul_assoc := UInt32.mul_assoc
mul_comm := UInt32.mul_comm
mul_one := UInt32.mul_one
left_distrib _ _ _ := UInt32.mul_add
zero_mul _ := UInt32.zero_mul
instance : CommRing UInt64 where
add_assoc := UInt64.add_assoc
add_comm := UInt64.add_comm
add_zero := UInt64.add_zero
neg_add_cancel := UInt64.add_left_neg
mul_assoc := UInt64.mul_assoc
mul_comm := UInt64.mul_comm
mul_one := UInt64.mul_one
left_distrib _ _ _ := UInt64.mul_add
zero_mul _ := UInt64.zero_mul
instance : CommRing USize where
add_assoc := USize.add_assoc
add_comm := USize.add_comm
add_zero := USize.add_zero
neg_add_cancel := USize.add_left_neg
mul_assoc := USize.mul_assoc
mul_comm := USize.mul_comm
mul_one := USize.mul_one
left_distrib _ _ _ := USize.mul_add
zero_mul _ := USize.zero_mul
end Lean.Grind

View File

@@ -18,9 +18,6 @@ theorem rfl_true : true = true :=
def intro_with_eq (p p' : Prop) (q : Sort u) (he : p = p') (h : p' q) : p q :=
fun hp => h (he.mp hp)
def intro_with_eq' (p p' : Prop) (q : p Sort u) (he : p = p') (h : (h : p') q (he.mpr_prop h)) : (h : p) q h :=
fun hp => h (he.mp hp)
/-! And -/
theorem and_eq_of_eq_true_left {a b : Prop} (h : a = True) : (a b) = b := by simp [h]
@@ -77,14 +74,6 @@ theorem eq_congr' {α : Sort u} {a₁ b₁ a₂ b₂ : α} (h₁ : a₁ = b₂)
theorem ne_of_ne_of_eq_left {α : Sort u} {a b c : α} (h₁ : a = b) (h₂ : b c) : a c := by simp [*]
theorem ne_of_ne_of_eq_right {α : Sort u} {a b c : α} (h₁ : a = c) (h₂ : b c) : b a := by simp [*]
/-! BEq -/
theorem beq_eq_true_of_eq {α : Type u} {_ : BEq α} {_ : LawfulBEq α} {a b : α} (h : a = b) : (a == b) = true := by
simp[*]
theorem beq_eq_false_of_diseq {α : Type u} {_ : BEq α} {_ : LawfulBEq α} {a b : α} (h : ¬ a = b) : (a == b) = false := by
simp[*]
/-! Bool.and -/
theorem Bool.and_eq_of_eq_true_left {a b : Bool} (h : a = true) : (a && b) = b := by simp [h]
@@ -113,9 +102,6 @@ theorem Bool.not_eq_of_eq_false {a : Bool} (h : a = false) : (!a) = true := by s
theorem Bool.eq_false_of_not_eq_true {a : Bool} (h : (!a) = true) : a = false := by simp_all
theorem Bool.eq_true_of_not_eq_false {a : Bool} (h : (!a) = false) : a = true := by simp_all
theorem Bool.eq_false_of_not_eq_true' {a : Bool} (h : ¬ a = true) : a = false := by simp_all
theorem Bool.eq_true_of_not_eq_false' {a : Bool} (h : ¬ a = false) : a = true := by simp_all
theorem Bool.false_of_not_eq_self {a : Bool} (h : (!a) = a) : False := by
by_cases a <;> simp_all

View File

@@ -106,7 +106,6 @@ init_grind_norm
or_true true_or or_false false_or or_assoc
-- ite
ite_true ite_false ite_true_false ite_false_true
dite_eq_ite
-- Forall
forall_and
-- Exists
@@ -120,7 +119,7 @@ init_grind_norm
-- Bool not
Bool.not_not
-- beq
beq_iff_eq beq_eq_decide_eq beq_self_eq_true
beq_iff_eq beq_eq_decide_eq
-- bne
bne_iff_ne bne_eq_decide_not_eq
-- Bool not eq true/false

View File

@@ -10,7 +10,7 @@ namespace Lean.Parser
/--
Reset all `grind` attributes. This command is intended for testing purposes only and should not be used in applications.
-/
syntax (name := resetGrindAttrs) "reset_grind_attrs%" : command
syntax (name := resetGrindAttrs) "%reset_grind_attrs" : command
namespace Attr
syntax grindEq := "= "
@@ -79,24 +79,6 @@ structure Config where
See paper "Model-based Theory Combination" for details.
-/
mbtc : Bool := true
/--
When set to `true` (default: `true`), local definitions are unfolded during normalization and internalization.
In other words, given a local context with an entry `x : t := e`, the free variable `x` is reduced to `e`.
Note that this behavior is also available in `simp`, but there its default is `false` because `simp` is not
always used as a terminal tactic, and it important to preserve the abstractions introduced by users.
Additionally, in `grind` we observed that `zetaDelta` is particularly important when combined with function induction.
In such scenarios, the same let-expressions can be introduced by function induction and also by unfolding the
corresponding definition. We want to avoid a situation in which `zetaDelta` is not applied to let-declarations
introduced by function induction while `zeta` unfolds the definition, causing a mismatch.
Finally, note that congruence closure is less effective on terms containing many binders such as
`lambda` and `let` expressions.
-/
zetaDelta := true
/--
When `true` (default: `true`), performs zeta reduction of let expressions during normalization.
That is, `let x := v; e[x]` reduces to `e[v]`. See also `zetaDelta`.
-/
zeta := true
deriving Inhabited, BEq
end Lean.Grind

View File

@@ -78,19 +78,6 @@ def offsetUnexpander : PrettyPrinter.Unexpander := fun stx => do
| `($_ $lhs:term $rhs:term) => `($lhs + $rhs)
| _ => throw ()
/-
Remark: `↑a` is notation for `Nat.cast a`. `Nat.cast` is an abbreviation
for `NatCast.natCast`. We added it because users wanted to use dot-notation (e.g., `a.cast`).
`grind` expands all reducible definitions. Thus, a `grind` failure state contains
many `NatCast.natCast` applications which is too verbose. We add the following
unexpander to cope with this issue.
-/
@[app_unexpander NatCast.natCast]
def natCastUnexpander : PrettyPrinter.Unexpander := fun stx => do
match stx with
| `($_ $a:term) => `($a)
| _ => throw ()
/--
A marker to indicate that a proposition has already been normalized and should not
be processed again.

View File

@@ -35,11 +35,8 @@ class PartialOrder (α : Sort u) where
This is intended to be used in the construction of `partial_fixpoint`, and not meant to be used otherwise.
-/
rel : α α Prop
/-- The “less-or-equal-to” or “approximates” relation is reflexive. -/
rel_refl : {x}, rel x x
/-- The “less-or-equal-to” or “approximates” relation is transitive. -/
rel_trans : {x y z}, rel x y rel y z rel x z
/-- The “less-or-equal-to” or “approximates” relation is antisymmetric. -/
rel_antisymm : {x y}, rel x y rel y x x = y
@[inherit_doc] scoped infix:50 "" => PartialOrder.rel
@@ -64,21 +61,15 @@ section CCPO
/--
A chain-complete partial order (CCPO) is a partial order where every chain has a least upper bound.
This is intended to be used in the construction of `partial_fixpoint`, and not meant to be used
otherwise.
This is intended to be used in the construction of `partial_fixpoint`, and not meant to be used otherwise.
-/
class CCPO (α : Sort u) extends PartialOrder α where
/--
The least upper bound of a chain.
This is intended to be used in the construction of `partial_fixpoint`, and not meant to be used
otherwise.
This is intended to be used in the construction of `partial_fixpoint`, and not meant to be used otherwise.
-/
csup : (α Prop) α
/--
`csup c` is the least upper bound of the chain `c` when all elements `x` that are at
least as large as `csup c` are at least as large as all elements of `c`, and vice versa.
-/
csup_spec {c : α Prop} (hc : chain c) : csup c x ( y, c y y x)
open PartialOrder CCPO
@@ -488,6 +479,7 @@ instance instCCPOPProd [CCPO α] [CCPO β] : CCPO (α ×' β) where
csup c := CCPO.csup (PProd.chain.fst c), CCPO.csup (PProd.chain.snd c)
csup_spec := by
intro a, b c hchain
dsimp
constructor
next =>
intro h₁, h₂ a', b' cab

View File

@@ -1484,18 +1484,12 @@ structure ApplyConfig where
namespace Rewrite
@[inherit_doc ApplyNewGoals]
abbrev NewGoals := ApplyNewGoals
/-- Configures the behavior of the `rewrite` and `rw` tactics. -/
structure Config where
/-- The transparency mode to use for unfolding -/
transparency : TransparencyMode := .reducible
/-- Whether to support offset constraints such as `?x + 1 =?= e` -/
offsetCnstrs : Bool := true
/-- Which occurrences to rewrite-/
occs : Occurrences := .all
/-- How to convert the resulting metavariables into new goals -/
newGoals : NewGoals := .nonDependentFirst
end Rewrite

View File

@@ -20,21 +20,17 @@ structure Module where
namespace Meta
/--
Which constants should be unfolded?
-/
inductive TransparencyMode where
/-- Unfolds all constants, even those tagged as `@[irreducible]`. -/
/-- unfold all constants, even those tagged as `@[irreducible]`. -/
| all
/-- Unfolds all constants except those tagged as `@[irreducible]`. -/
/-- unfold all constants except those tagged as `@[irreducible]`. -/
| default
/-- Unfolds only constants tagged with the `@[reducible]` attribute. -/
/-- unfold only constants tagged with the `@[reducible]` attribute. -/
| reducible
/-- Unfolds reducible constants and constants tagged with the `@[instance]` attribute. -/
/-- unfold reducible constants and constants tagged with the `@[instance]` attribute. -/
| instances
deriving Inhabited, BEq
/-- Which structure types should eta be used with? -/
inductive EtaStructMode where
/-- Enable eta for structure and classes. -/
| all

View File

@@ -1415,11 +1415,6 @@ class Zero (α : Type u) where
/-- The zero element of the type. -/
zero : α
/-- A type with a "one" element. -/
class One (α : Type u) where
/-- The "one" element of the type. -/
one : α
/-- The homogeneous version of `HAdd`: `a + b : α` where `a b : α`. -/
class Add (α : Type u) where
/-- `a + b` computes the sum of `a` and `b`. See `HAdd`. -/
@@ -2035,9 +2030,7 @@ structure BitVec (w : Nat) where
toFin : Fin (hPow 2 w)
/--
Bitvectors have decidable equality.
This should be used via the instance `DecidableEq (BitVec n)`.
Bitvectors have decidable equality. This should be used via the instance `DecidableEq (BitVec n)`.
-/
-- We manually derive the `DecidableEq` instances for `BitVec` because
-- we want to have builtin support for bit-vector literals, and we
@@ -2056,11 +2049,8 @@ instance : DecidableEq (BitVec n) := BitVec.decEq
protected def BitVec.ofNatLT {n : Nat} (i : Nat) (p : LT.lt i (hPow 2 n)) : BitVec n where
toFin := i, p
/--
Return the underlying `Nat` that represents a bitvector.
This is O(1) because `BitVec` is a (zero-cost) wrapper around a `Nat`.
-/
/-- Given a bitvector `x`, return the underlying `Nat`. This is O(1) because `BitVec` is a
(zero-cost) wrapper around a `Nat`. -/
protected def BitVec.toNat (x : BitVec n) : Nat := x.toFin.val
instance : LT (BitVec n) where lt := (LT.lt ·.toNat ·.toNat)

View File

@@ -334,13 +334,6 @@ theorem not_forall_of_exists_not {p : α → Prop} : (∃ x, ¬p x) → ¬∀ x,
@[simp] theorem exists_eq_right' : ( a, p a a' = a) p a' := by simp [@eq_comm _ a']
@[simp] theorem exists_prop_eq {p : (a : α) a = a' Prop} :
( (a : α) (h : a = a'), p a h) p a' rfl :=
fun _, e, h => e h, fun h => _, rfl, h
@[simp] theorem exists_prop_eq' {p : (a : α) a' = a Prop} :
( (a : α) (h : a' = a), p a h) p a' rfl := by simp [@eq_comm _ a']
@[simp] theorem forall_eq_or_imp : ( a, a = a' q a p a) p a' a, q a p a := by
simp only [or_imp, forall_and, forall_eq]

View File

@@ -17,7 +17,6 @@ Paths consist of a sequence of directories followed by the name of a file or dir
delimited by a platform-dependent separator character (see `System.FilePath.pathSeparator`).
-/
structure FilePath where
/-- The string representation of the path. -/
toString : String
deriving Inhabited, DecidableEq, Hashable

View File

@@ -1367,7 +1367,8 @@ A child process that was spawned with configuration `cfg`.
The configuration determines whether the child process's standard input, standard output, and
standard error are `IO.FS.Handle`s or `Unit`.
-/
structure Child (cfg : StdioConfig) where private mk ::
-- TODO(Sebastian): constructor must be private
structure Child (cfg : StdioConfig) where
/--
The child process's standard input handle, if it was configured as `IO.Process.Stdio.piped`, or
`()` otherwise.
@@ -1427,9 +1428,6 @@ standard input is exhausted.
@[extern "lean_io_process_child_take_stdin"] opaque Child.takeStdin {cfg : @& StdioConfig} : Child cfg
IO (cfg.stdin.toHandleType × Child { cfg with stdin := Stdio.null })
/-- Returns the operating system process id of the child process. -/
@[extern "lean_io_process_child_pid"] opaque Child.pid {cfg : @& StdioConfig} : Child cfg UInt32
/--
The result of running a process to completion.
-/
@@ -1484,11 +1482,8 @@ The `FileRight` structure describes these permissions for a file's owner, member
group, and all others.
-/
structure AccessRight where
/-- The file can be read. -/
read : Bool := false
/-- The file can be written to. -/
write : Bool := false
/-- The file can be executed. -/
execution : Bool := false
/--

View File

@@ -42,19 +42,12 @@ def dbgSleep {α : Type u} (ms : UInt32) (f : Unit → α) : α := f ()
@[never_extract, inline] def panicWithPosWithDecl {α : Sort u} [Inhabited α] (modName : String) (declName : String) (line col : Nat) (msg : String) : α :=
panic (mkPanicMessageWithDecl modName declName line col msg)
/--
Returns the address at which an object is allocated.
This function is unsafe because it can distinguish between definitionally equal values.
-/
@[extern "lean_ptr_addr"]
unsafe opaque ptrAddrUnsafe {α : Type u} (a : @& α) : USize
/--
Returns `true` if `a` is an exclusive object.
An object is exclusive if it is single-threaded and its reference counter is 1. This function is
unsafe because it can distinguish between definitionally equal values.
We say an object is exclusive if it is single-threaded and its reference counter is 1.
-/
@[extern "lean_is_exclusive_obj"]
unsafe opaque isExclusiveUnsafe {α : Type u} (a : @& α) : Bool
@@ -63,21 +56,8 @@ set_option linter.unusedVariables.funArgs false in
@[inline] unsafe def withPtrAddrUnsafe {α : Type u} {β : Type v} (a : α) (k : USize β) (h : u₁ u₂, k u₁ = k u₂) : β :=
k (ptrAddrUnsafe a)
/--
Compares two objects for pointer equality.
Two objects are pointer-equal if, at runtime, they are allocated at exactly the same address. This
function is unsafe because it can distinguish between definitionally equal values.
-/
@[inline] unsafe def ptrEq (a b : α) : Bool := ptrAddrUnsafe a == ptrAddrUnsafe b
/--
Compares two lists of objects for element-wise pointer equality. Returns `true` if both lists are
the same length and the objects at the corresponding indices of each list are pointer-equal.
Two objects are pointer-equal if, at runtime, they are allocated at exactly the same address. This
function is unsafe because it can distinguish between definitionally equal values.
-/
unsafe def ptrEqList : (as bs : List α) Bool
| [], [] => true
| a::as, b::bs => if ptrEq a b then ptrEqList as bs else false

View File

@@ -40,4 +40,3 @@ import Lean.Replay
import Lean.PrivateName
import Lean.PremiseSelection
import Lean.Namespace
import Lean.EnvExtension

View File

@@ -4,7 +4,7 @@ Released under Apache 2.0 license as described in the file LICENSE.
Authors: Leonardo de Moura
-/
prelude
import Lean.EnvExtension
import Lean.Environment
namespace Lean

View File

@@ -14,32 +14,29 @@ namespace Lean.Compiler.FFI
@[extern "lean_get_leanc_extra_flags"]
private opaque getLeancExtraFlags : Unit String
private def flagsStringToArray (s : String) : Array String :=
s.splitOn.toArray |>.filter (· "")
/-- Return C compiler flags for including Lean's headers. -/
def getCFlags (leanSysroot : FilePath) : Array String :=
#["-I", (leanSysroot / "include").toString] ++ flagsStringToArray (getLeancExtraFlags ())
#["-I", (leanSysroot / "include").toString] ++ (getLeancExtraFlags ()).trim.splitOn
@[extern "lean_get_leanc_internal_flags"]
private opaque getLeancInternalFlags : Unit String
/-- Return C compiler flags needed to use the C compiler bundled with the Lean toolchain. -/
def getInternalCFlags (leanSysroot : FilePath) : Array String :=
flagsStringToArray (getLeancInternalFlags ()) |>.map (·.replace "ROOT" leanSysroot.toString)
(getLeancInternalFlags ()).trim.splitOn.toArray.map (·.replace "ROOT" leanSysroot.toString)
@[extern "lean_get_linker_flags"]
private opaque getBuiltinLinkerFlags (linkStatic : Bool) : String
/-- Return linker flags for linking against Lean's libraries. -/
def getLinkerFlags (leanSysroot : FilePath) (linkStatic := true) : Array String :=
#["-L", (leanSysroot / "lib" / "lean").toString] ++ flagsStringToArray (getBuiltinLinkerFlags linkStatic)
#["-L", (leanSysroot / "lib" / "lean").toString] ++ (getBuiltinLinkerFlags linkStatic).trim.splitOn
@[extern "lean_get_internal_linker_flags"]
private opaque getBuiltinInternalLinkerFlags : Unit String
/-- Return linker flags needed to use the linker bundled with the Lean toolchain. -/
def getInternalLinkerFlags (leanSysroot : FilePath) : Array String :=
flagsStringToArray (getBuiltinInternalLinkerFlags ()) |>.map (·.replace "ROOT" leanSysroot.toString)
(getBuiltinInternalLinkerFlags ()).trim.splitOn.toArray.map (·.replace "ROOT" leanSysroot.toString)
end Lean.Compiler.FFI

View File

@@ -180,7 +180,7 @@ structure CtorInfo where
size : Nat
usize : Nat
ssize : Nat
deriving Inhabited, Repr
deriving Repr
def CtorInfo.beq : CtorInfo CtorInfo Bool
| n₁, cidx₁, size₁, usize₁, ssize₁, n₂, cidx₂, size₂, usize₂, ssize₂ =>
@@ -223,7 +223,6 @@ inductive Expr where
| lit (v : LitVal)
/-- Return `1 : uint8` Iff `RC(x) > 1` -/
| isShared (x : VarId)
deriving Inhabited
@[export lean_ir_mk_ctor_expr] def mkCtorExpr (n : Name) (cidx : Nat) (size : Nat) (usize : Nat) (ssize : Nat) (ys : Array Arg) : Expr :=
Expr.ctor n, cidx, size, usize, ssize ys

View File

@@ -15,7 +15,6 @@ inductive CtorFieldInfo where
| object (i : Nat)
| usize (i : Nat)
| scalar (sz : Nat) (offset : Nat) (type : IRType)
deriving Inhabited
namespace CtorFieldInfo

View File

@@ -10,7 +10,10 @@ import Lean.Compiler.LCNF.Internalize
namespace Lean.Compiler.LCNF
builtin_initialize auxDeclCacheExt : CacheExtension Decl Name CacheExtension.register
abbrev AuxDeclCache := PHashMap Decl Name
builtin_initialize auxDeclCacheExt : EnvExtension AuxDeclCache
registerEnvExtension (pure {}) (asyncMode := .sync) -- compilation is non-parallel anyway
inductive CacheAuxDeclResult where
| new
@@ -19,11 +22,11 @@ inductive CacheAuxDeclResult where
def cacheAuxDecl (decl : Decl) : CompilerM CacheAuxDeclResult := do
let key := { decl with name := .anonymous }
let key normalizeFVarIds key
match ( auxDeclCacheExt.find? key) with
match auxDeclCacheExt.getState ( getEnv) |>.find? key with
| some declName =>
return .alreadyCached declName
| none =>
auxDeclCacheExt.insert key decl.name
modifyEnv fun env => auxDeclCacheExt.modifyState env fun s => s.insert key decl.name
return .new
end Lean.Compiler.LCNF

View File

@@ -14,15 +14,21 @@ State for the environment extension used to save the LCNF base phase type for de
that do not have code associated with them.
Example: constructors, inductive types, foreign functions.
-/
builtin_initialize baseTypeExt : CacheExtension Name Expr CacheExtension.register
structure BaseTypeExtState where
/-- The LCNF type for the `base` phase. -/
base : PHashMap Name Expr := {}
deriving Inhabited
builtin_initialize baseTypeExt : EnvExtension BaseTypeExtState
registerEnvExtension (pure {}) (asyncMode := .sync) -- compilation is non-parallel anyway
def getOtherDeclBaseType (declName : Name) (us : List Level) : CoreM Expr := do
let info getConstInfo declName
let type match ( baseTypeExt.find? declName) with
let type match baseTypeExt.getState ( getEnv) |>.base.find? declName with
| some type => pure type
| none =>
let type Meta.MetaM.run' <| toLCNFType info.type
baseTypeExt.insert declName type
modifyEnv fun env => baseTypeExt.modifyState env fun s => { s with base := s.base.insert declName type }
pure type
return type.instantiateLevelParamsNoCache info.levelParams us

View File

@@ -483,26 +483,4 @@ def getConfig : CompilerM ConfigOptions :=
def CompilerM.run (x : CompilerM α) (s : State := {}) (phase : Phase := .base) : CoreM α := do
x { phase, config := toConfigOptions ( getOptions) } |>.run' s
/-- Environment extension for local caching of key-value pairs, not persisted in .olean files. -/
structure CacheExtension (α β : Type) [BEq α] [Hashable α] extends EnvExtension (List α × PHashMap α β)
deriving Inhabited
namespace CacheExtension
def register [BEq α] [Hashable α] [Inhabited β] :
IO (CacheExtension α β) :=
CacheExtension.mk <$> registerEnvExtension (pure ([], {})) (asyncMode := .sync) -- compilation is non-parallel anyway
(replay? := some fun oldState newState _ s =>
let newEntries := newState.1.take (newState.1.length - oldState.1.length)
newEntries.foldl (init := s) fun s e =>
(e :: s.1, s.2.insert e (newState.2.find! e)))
def insert [BEq α] [Hashable α] [Inhabited β] (ext : CacheExtension α β) (a : α) (b : β) : CoreM Unit := do
modifyEnv (ext.modifyState · fun as, m => (a :: as, m.insert a b))
def find? [BEq α] [Hashable α] [Inhabited β] (ext : CacheExtension α β) (a : α) : CoreM (Option β) := do
return ext.toEnvExtension.getState ( getEnv) |>.2.find? a
end CacheExtension
end Lean.Compiler.LCNF

View File

@@ -249,7 +249,6 @@ builtin_initialize functionSummariesExt : SimplePersistentEnvExtension (Name ×
addEntryFn := fun s e, n => s.insert e n
toArrayFn := fun s => s.toArray.qsort decLt
asyncMode := .sync -- compilation is non-parallel anyway
replay? := some <| SimplePersistentEnvExtension.replayOfFilter (!·.contains ·.1) (fun s e, n => s.insert e n)
}
/--

View File

@@ -111,14 +111,20 @@ State for the environment extension used to save the LCNF mono phase type for de
that do not have code associated with them.
Example: constructors, inductive types, foreign functions.
-/
builtin_initialize monoTypeExt : CacheExtension Name Expr CacheExtension.register
structure MonoTypeExtState where
/-- The LCNF type for the `mono` phase. -/
mono : PHashMap Name Expr := {}
deriving Inhabited
builtin_initialize monoTypeExt : EnvExtension MonoTypeExtState
registerEnvExtension (pure {}) (asyncMode := .sync) -- compilation is non-parallel anyway
def getOtherDeclMonoType (declName : Name) : CoreM Expr := do
match ( monoTypeExt.find? declName) with
match monoTypeExt.getState ( getEnv) |>.mono.find? declName with
| some type => return type
| none =>
let type toMonoType ( getOtherDeclBaseType declName [])
monoTypeExt.insert declName type
modifyEnv fun env => monoTypeExt.modifyState env fun s => { s with mono := s.mono.insert declName type }
return type
end Lean.Compiler.LCNF

View File

@@ -94,6 +94,7 @@ builtin_initialize passManagerExt : PersistentEnvExtension Name (Name × PassMan
addImportedFn := fun ns => return ([], ImportM.runCoreM <| runImportedDecls ns)
addEntryFn := fun (installerDeclNames, _) (installerDeclName, managerNew) => (installerDeclName :: installerDeclNames, managerNew)
exportEntriesFn := fun s => s.1.reverse.toArray
asyncMode := .sync
}
def getPassManager : CoreM PassManager :=

View File

@@ -21,21 +21,22 @@ private abbrev findAtSorted? (decls : Array Decl) (declName : Name) : Option Dec
let tmpDecl := { tmpDecl with name := declName }
decls.binSearch tmpDecl declLt
abbrev DeclExt := SimplePersistentEnvExtension Decl DeclExtState
abbrev DeclExt := PersistentEnvExtension Decl Decl DeclExtState
def mkDeclExt (name : Name := by exact decl_name%) : IO DeclExt := do
registerSimplePersistentEnvExtension {
registerPersistentEnvExtension {
name := name
addImportedFn := fun _ => {}
mkInitial := return {}
addImportedFn := fun _ => return {}
addEntryFn := fun decls decl => decls.insert decl.name decl
toArrayFn := (sortDecls ·.toArray)
exportEntriesFn := fun s =>
let decls := s.foldl (init := #[]) fun decls _ decl => decls.push decl
sortDecls decls
asyncMode := .sync -- compilation is non-parallel anyway
replay? := some <| SimplePersistentEnvExtension.replayOfFilter
(fun s d => !s.contains d.name) (fun decls decl => decls.insert decl.name decl)
}
builtin_initialize baseExt : DeclExt mkDeclExt
builtin_initialize monoExt : DeclExt mkDeclExt
builtin_initialize baseExt : PersistentEnvExtension Decl Decl DeclExtState mkDeclExt
builtin_initialize monoExt : PersistentEnvExtension Decl Decl DeclExtState mkDeclExt
def getDeclCore? (env : Environment) (ext : DeclExt) (declName : Name) : Option Decl :=
match env.getModuleIdxFor? declName with

View File

@@ -397,7 +397,7 @@ structure FolderOleanEntry where
structure FolderEntry extends FolderOleanEntry where
folder : Folder
builtin_initialize folderExt : PersistentEnvExtension FolderOleanEntry FolderEntry (List FolderEntry × SMap Name Folder)
builtin_initialize folderExt : PersistentEnvExtension FolderOleanEntry FolderEntry (List FolderOleanEntry × SMap Name Folder)
registerPersistentEnvExtension {
mkInitial := return ([], builtinFolders)
addImportedFn := fun entriesArray => do
@@ -408,12 +408,9 @@ builtin_initialize folderExt : PersistentEnvExtension FolderOleanEntry FolderEnt
let folder IO.ofExcept <| getFolderCore ctx.env ctx.opts folderDeclName
folders := folders.insert declName folder
return ([], folders.switch)
addEntryFn := fun (entries, map) entry => (entry :: entries, map.insert entry.declName entry.folder)
exportEntriesFn := fun (entries, _) => entries.reverse.toArray.map (·.toFolderOleanEntry)
addEntryFn := fun (entries, map) entry => (entry.toFolderOleanEntry :: entries, map.insert entry.declName entry.folder)
exportEntriesFn := fun (entries, _) => entries.reverse.toArray
asyncMode := .sync
replay? := some fun oldState newState _ s =>
let newEntries := newState.1.take (newState.1.length - oldState.1.length)
(newEntries ++ s.1, newEntries.foldl (init := s.2) fun s e => s.insert e.declName (newState.2.find! e.declName))
}
def registerFolder (declName : Name) (folderDeclName : Name) : CoreM Unit := do

View File

@@ -86,8 +86,6 @@ builtin_initialize specExtension : SimplePersistentEnvExtension SpecEntry SpecSt
addImportedFn := fun _ => {}
toArrayFn := fun s => sortEntries s.toArray
asyncMode := .sync
replay? := some <| SimplePersistentEnvExtension.replayOfFilter
(!·.specInfo.contains ·.declName) SpecState.addEntry
}
/--

View File

@@ -33,8 +33,6 @@ builtin_initialize specCacheExt : SimplePersistentEnvExtension CacheEntry Cache
addEntryFn := addEntry
addImportedFn := fun es => (mkStateFromImportedEntries addEntry {} es).switch
asyncMode := .sync
replay? := some <| SimplePersistentEnvExtension.replayOfFilter
(!·.contains ·.key) addEntry
}
def cacheSpec (key : Expr) (declName : Name) : CoreM Unit :=

View File

@@ -4,7 +4,7 @@ Released under Apache 2.0 license as described in the file LICENSE.
Authors: Leonardo de Moura
-/
prelude
import Lean.EnvExtension
import Lean.Environment
namespace Lean

View File

@@ -145,12 +145,34 @@ structure DiagnosticWith (α : Type) where
/-- An array of related diagnostic information, e.g. when symbol-names within a scope collide all definitions can be marked via this property. -/
relatedInformation? : Option (Array DiagnosticRelatedInformation) := none
/-- A data entry field that is preserved between a `textDocument/publishDiagnostics` notification and `textDocument/codeAction` request. -/
data? : Option Json := none
data?: Option Json := none
deriving Inhabited, BEq, ToJson, FromJson
def DiagnosticWith.fullRange (d : DiagnosticWith α) : Range :=
d.fullRange?.getD d.range
attribute [local instance] Ord.arrayOrd in
/-- Restriction of `DiagnosticWith` to properties that are displayed to users in the InfoView. -/
private structure DiagnosticWith.UserVisible (α : Type) where
range : Range
fullRange? : Option Range
severity? : Option DiagnosticSeverity
code? : Option DiagnosticCode
source? : Option String
message : α
tags? : Option (Array DiagnosticTag)
relatedInformation? : Option (Array DiagnosticRelatedInformation)
deriving Ord
/-- Extracts user-visible properties from the given `DiagnosticWith`. -/
private def DiagnosticWith.UserVisible.ofDiagnostic (d : DiagnosticWith α)
: DiagnosticWith.UserVisible α :=
{ d with }
/-- Compares `DiagnosticWith` instances modulo non-user-facing properties. -/
def compareByUserVisible [Ord α] (a b : DiagnosticWith α) : Ordering :=
compare (DiagnosticWith.UserVisible.ofDiagnostic a) (DiagnosticWith.UserVisible.ofDiagnostic b)
abbrev Diagnostic := DiagnosticWith String
/-- Parameters for the [`textDocument/publishDiagnostics` notification](https://microsoft.github.io/language-server-protocol/specifications/lsp/3.17/specification/#textDocument_publishDiagnostics). -/

View File

@@ -7,7 +7,6 @@ Authors: Joscha Mennicken
prelude
import Lean.Expr
import Lean.Data.Lsp.Basic
import Lean.Data.JsonRpc
import Std.Data.TreeMap
set_option linter.missingDocs true -- keep it documented
@@ -202,62 +201,4 @@ structure LeanStaleDependencyParams where
staleDependency : DocumentUri
deriving FromJson, ToJson
/-- LSP type for `Lean.OpenDecl`. -/
inductive OpenNamespace
/-- All declarations in `«namespace»` are opened, except for `exceptions`. -/
| allExcept («namespace» : Name) (exceptions : Array Name)
/-- The declaration `«from»` is renamed to `to`. -/
| renamed («from» : Name) (to : Name)
deriving FromJson, ToJson
/-- Query in the `$/lean/queryModule` watchdog <- worker request. -/
structure LeanModuleQuery where
/-- Identifier (potentially partial) to query. -/
identifier : String
/--
Namespaces that are open at the position of `identifier`.
Used for accurately matching declarations against `identifier` in context.
-/
openNamespaces : Array OpenNamespace
deriving FromJson, ToJson
/--
Used in the `$/lean/queryModule` watchdog <- worker request, which is used by the worker to
extract information from the .ilean information in the watchdog.
-/
structure LeanQueryModuleParams where
/--
The request ID in the context of which this worker -> watchdog request was emitted.
Used for cancelling this request in the watchdog.
-/
sourceRequestID : JsonRpc.RequestID
/-- Module queries for extracting .ilean information in the watchdog. -/
queries : Array LeanModuleQuery
deriving FromJson, ToJson
/-- Result entry of a module query. -/
structure LeanIdentifier where
/-- Module that `decl` is defined in. -/
module : Name
/-- Full name of the declaration that matches the query. -/
decl : Name
/-- Whether this `decl` matched the query exactly. -/
isExactMatch : Bool
deriving FromJson, ToJson
/--
Result for a single module query.
Identifiers in the response are sorted descendingly by how well they match the query.
-/
abbrev LeanQueriedModule := Array LeanIdentifier
/-- Response for the `$/lean/queryModule` watchdog <- worker request. -/
structure LeanQueryModuleResponse where
/--
Results for each query in `LeanQueryModuleParams`.
Positions correspond to `queries` in the parameter of the request.
-/
queryResults : Array LeanQueriedModule
deriving FromJson, ToJson, Inhabited
end Lean.Lsp

View File

@@ -11,8 +11,11 @@ set_option linter.missingDocs true
namespace Lean
@[extern "lean_manual_get_root"]
/- After a stage0 update:
@[extern "lean_get_manual_root"]
private opaque getManualRoot : Unit → String
-/
private def getManualRoot : Unit String := fun () => ""
private def fallbackManualRoot := "https://lean-lang.org/doc/reference/latest/"

View File

@@ -1222,8 +1222,8 @@ private def resolveLValAux (e : Expr) (eType : Expr) (lval : LVal) : TermElabM L
-- Then search the environment
if let some (baseStructName, fullName) findMethod? structName (.mkSimple fieldName) then
return LValResolution.const baseStructName structName fullName
let msg := mkUnknownIdentifierMessage m!"invalid field '{fieldName}', the environment does not contain '{Name.mkStr structName fieldName}'"
throwLValError e eType msg
throwLValError e eType
m!"invalid field '{fieldName}', the environment does not contain '{Name.mkStr structName fieldName}'"
| none, LVal.fieldName _ _ (some suffix) _ =>
if e.isConst then
throwUnknownConstant (e.constName! ++ suffix)
@@ -1502,7 +1502,7 @@ where
else if let some (fvar, []) resolveLocalName idNew then
return fvar
else
throwUnknownIdentifier m!"invalid dotted identifier notation, unknown identifier `{idNew}` from expected type{indentExpr expectedType}"
throwError "invalid dotted identifier notation, unknown identifier `{idNew}` from expected type{indentExpr expectedType}"
catch
| ex@(.error ..) =>
match ( unfoldDefinition? resultType) with
@@ -1550,7 +1550,7 @@ private partial def elabAppFn (f : Syntax) (lvals : List LVal) (namedArgs : Arra
| `(@$_) => throwUnsupportedSyntax -- invalid occurrence of `@`
| `(_) => throwError "placeholders '_' cannot be used where a function is expected"
| `(.$id:ident) =>
addCompletionInfo <| CompletionInfo.dotId id id.getId ( getLCtx) expectedType?
addCompletionInfo <| CompletionInfo.dotId f id.getId ( getLCtx) expectedType?
let fConst resolveDotName id expectedType?
let s observing do
-- Use (force := true) because we want to record the result of .ident resolution even in patterns

View File

@@ -22,8 +22,7 @@ def processHeader (header : Syntax) (opts : Options) (messages : MessageLog)
(plugins : Array System.FilePath := #[]) (leakEnv := false)
: IO (Environment × MessageLog) := do
try
let env
importModules (leakEnv := leakEnv) (loadExts := true) (headerToImports header) opts trustLevel plugins
let env importModules (leakEnv := leakEnv) (headerToImports header) opts trustLevel plugins
pure (env, messages)
catch e =>
let env mkEmptyEnvironment

View File

@@ -1162,9 +1162,6 @@ private def logGoalsAccomplishedSnapshotTask (views : Array DefView)
-- Skip 'goals accomplished' task if we are on the command line.
-- These messages are only used in the language server.
return
-- make sure we don't accidentally keep any nested promises alive that would otherwise
-- auto-resolve to `none`
let views := views.map fun view => (view.ref, view.kind)
let currentLog Core.getMessageLog
let snaps := #[SnapshotTask.finished none (toSnapshotTree defsParsedSnap)] ++
( getThe Core.State).snapshotTasks
@@ -1177,11 +1174,11 @@ private def logGoalsAccomplishedSnapshotTask (views : Array DefView)
msg.severity matches .error || msg.data.hasTag (· == `hasSorry)
if hasErrorOrSorry then
return
for d in defsParsedSnap.defs, (ref, kind) in views do
for d in defsParsedSnap.defs, view in views do
let logGoalsAccomplished :=
let msgData := .tagged `goalsAccomplished m!"Goals accomplished!"
logAt ref msgData (severity := .information) (isSilent := true)
match kind with
logAt view.ref msgData (severity := .information) (isSilent := true)
match view.kind with
| .theorem =>
logGoalsAccomplished
| .example =>

View File

@@ -121,12 +121,6 @@ structure ElabHeaderResult extends PreElabHeaderResult where
indFVar : Expr
deriving Inhabited
/-- An intermediate step for mutual inductive elaboration. See `InductiveElabDescr` -/
structure InductiveElabStep3 where
/-- Finalize the inductive type, after they are all added to the environment, after auxiliary definitions are added, and after computed fields are registered.
The `levelParams`, `params`, and `replaceIndFVars` arguments of `prefinalize` are still valid here. -/
finalize : TermElabM Unit := pure ()
/-- An intermediate step for mutual inductive elaboration. See `InductiveElabDescr`. -/
structure InductiveElabStep2 where
/-- The constructors produced by `InductiveElabStep1`. -/
@@ -139,7 +133,9 @@ structure InductiveElabStep2 where
/-- Step to finalize term elaboration, done immediately after universe level processing is complete. -/
finalizeTermElab : TermElabM Unit := pure ()
/-- Like `finalize`, but occurs before `afterTypeChecking` attributes. -/
prefinalize (levelParams : List Name) (params : Array Expr) (replaceIndFVars : Expr MetaM Expr) : TermElabM InductiveElabStep3 := fun _ _ _ => pure {}
prefinalize (levelParams : List Name) (params : Array Expr) (replaceIndFVars : Expr MetaM Expr) : TermElabM Unit := fun _ _ _ => pure ()
/-- Finalize the inductive type, after they are all added to the environment, after auxiliary definitions are added, and after computed fields are registered. -/
finalize (levelParams : List Name) (params : Array Expr) (replaceIndFVars : Expr MetaM Expr) : TermElabM Unit := fun _ _ _ => pure ()
deriving Inhabited
/-- An intermediate step for mutual inductive elaboration. See `InductiveElabDescr`. -/
@@ -164,7 +160,7 @@ Elaboration occurs in the following steps:
- Elaboration of constructors is finalized, with additional tasks done by each `InductiveStep2.collectUniverses`.
- The inductive family is added to the environment and is checked by the kernel.
- Attributes and other finalization activities are performed, including those defined
by `InductiveStep2.prefinalize` and `InductiveStep3.finalize`.
by `InductiveStep2.prefinalize` and `InductiveStep2.finalize`.
-/
structure InductiveElabDescr where
mkInductiveView : Modifiers Syntax TermElabM InductiveElabStep1
@@ -1052,9 +1048,9 @@ private def elabInductiveViewsPostprocessing (views : Array InductiveView) (res
let ref := view0.ref
applyComputedFields views -- NOTE: any generated code before this line is invalid
liftTermElabM <| withMCtx res.mctx <| withLCtx res.lctx res.localInsts do
let finalizers res.elabs.mapM fun elab' => elab'.prefinalize res.levelParams res.params res.replaceIndFVars
for elab' in res.elabs do elab'.prefinalize res.levelParams res.params res.replaceIndFVars
for view in views do withRef view.declId <| Term.applyAttributesAt view.declName view.modifiers.attrs .afterTypeChecking
for elab' in finalizers do elab'.finalize
for elab' in res.elabs do elab'.finalize res.levelParams res.params res.replaceIndFVars
applyDerivingHandlers views
runTermElabM fun _ => Term.withDeclName view0.declName do withRef ref do
for view in views do withRef view.declId <| Term.applyAttributesAt view.declName view.modifiers.attrs .afterCompilation

View File

@@ -87,11 +87,11 @@ def applyAttributesOf (preDefs : Array PreDefinition) (applicationTime : Attribu
for preDef in preDefs do
applyAttributesAt preDef.declName preDef.modifiers.attrs applicationTime
def abstractNestedProofs (preDef : PreDefinition) (cache := true) : MetaM PreDefinition := withRef preDef.ref do
def abstractNestedProofs (preDef : PreDefinition) : MetaM PreDefinition := withRef preDef.ref do
if preDef.kind.isTheorem || preDef.kind.isExample then
pure preDef
else do
let value Meta.abstractNestedProofs (cache := cache) preDef.declName preDef.value
let value Meta.abstractNestedProofs preDef.declName preDef.value
pure { preDef with value := value }
/-- Auxiliary method for (temporarily) adding pre definition as an axiom -/
@@ -121,9 +121,9 @@ private def reportTheoremDiag (d : TheoremVal) : TermElabM Unit := do
-- let info
logInfo <| MessageData.trace { cls := `theorem } m!"{d.name}" (#[sizeMsg] ++ constOccsMsg)
private def addNonRecAux (preDef : PreDefinition) (compile : Bool) (all : List Name) (applyAttrAfterCompilation := true) (cacheProofs := true) : TermElabM Unit :=
private def addNonRecAux (preDef : PreDefinition) (compile : Bool) (all : List Name) (applyAttrAfterCompilation := true) : TermElabM Unit :=
withRef preDef.ref do
let preDef abstractNestedProofs (cache := cacheProofs) preDef
let preDef abstractNestedProofs preDef
let mkDefDecl : TermElabM Declaration :=
return Declaration.defnDecl {
name := preDef.declName, levelParams := preDef.levelParams, type := preDef.type, value := preDef.value
@@ -168,8 +168,8 @@ private def addNonRecAux (preDef : PreDefinition) (compile : Bool) (all : List N
def addAndCompileNonRec (preDef : PreDefinition) (all : List Name := [preDef.declName]) : TermElabM Unit := do
addNonRecAux preDef (compile := true) (all := all)
def addNonRec (preDef : PreDefinition) (applyAttrAfterCompilation := true) (all : List Name := [preDef.declName]) (cacheProofs := true) : TermElabM Unit := do
addNonRecAux preDef (compile := false) (applyAttrAfterCompilation := applyAttrAfterCompilation) (all := all) (cacheProofs := cacheProofs)
def addNonRec (preDef : PreDefinition) (applyAttrAfterCompilation := true) (all : List Name := [preDef.declName]) : TermElabM Unit := do
addNonRecAux preDef (compile := false) (applyAttrAfterCompilation := applyAttrAfterCompilation) (all := all)
/--
Eliminate recursive application annotations containing syntax. These annotations are used by the well-founded recursion module

View File

@@ -27,7 +27,7 @@ where
go (fvars.push x) (vals.map fun val => val.bindingBody!.instantiate1 x)
def addPreDefsFromUnary (preDefs : Array PreDefinition) (preDefsNonrec : Array PreDefinition)
(unaryPreDefNonRec : PreDefinition) (cacheProofs := true) : TermElabM Unit := do
(unaryPreDefNonRec : PreDefinition) : TermElabM Unit := do
/-
We must remove `implemented_by` attributes from the auxiliary application because
this attribute is only relevant for code that is compiled. Moreover, the `[implemented_by <decl>]`
@@ -41,21 +41,21 @@ def addPreDefsFromUnary (preDefs : Array PreDefinition) (preDefsNonrec : Array P
-- we recognize that below and then do not set @[irreducible]
withOptions (allowUnsafeReducibility.set · true) do
if unaryPreDefNonRec.declName = preDefs[0]!.declName then
addNonRec preDefNonRec (applyAttrAfterCompilation := false) (cacheProofs := cacheProofs)
addNonRec preDefNonRec (applyAttrAfterCompilation := false)
else
withEnableInfoTree false do
addNonRec preDefNonRec (applyAttrAfterCompilation := false) (cacheProofs := cacheProofs)
preDefsNonrec.forM (addNonRec · (applyAttrAfterCompilation := false) (all := declNames) (cacheProofs := cacheProofs))
addNonRec preDefNonRec (applyAttrAfterCompilation := false)
preDefsNonrec.forM (addNonRec · (applyAttrAfterCompilation := false) (all := declNames))
/--
Cleans the right-hand-sides of the predefinitions, to prepare for inclusion in the EqnInfos:
* Remove RecAppSyntax markers
* Abstracts nested proofs (and for that, add the `_unsafe_rec` definitions)
-/
def cleanPreDefs (preDefs : Array PreDefinition) (cacheProofs := true) : TermElabM (Array PreDefinition) := do
def cleanPreDefs (preDefs : Array PreDefinition) : TermElabM (Array PreDefinition) := do
addAndCompilePartialRec preDefs
let preDefs preDefs.mapM (eraseRecAppSyntax ·)
let preDefs preDefs.mapM (abstractNestedProofs (cache := cacheProofs) ·)
let preDefs preDefs.mapM (abstractNestedProofs ·)
return preDefs
/--

View File

@@ -66,8 +66,8 @@ def wfRecursion (preDefs : Array PreDefinition) (termMeasure?s : Array (Option T
trace[Elab.definition.wf] ">> {preDefNonRec.declName} :=\n{preDefNonRec.value}"
let preDefsNonrec preDefsFromUnaryNonRec fixedParamPerms argsPacker preDefs preDefNonRec
Mutual.addPreDefsFromUnary (cacheProofs := false) preDefs preDefsNonrec preDefNonRec
let preDefs Mutual.cleanPreDefs (cacheProofs := false) preDefs
Mutual.addPreDefsFromUnary preDefs preDefsNonrec preDefNonRec
let preDefs Mutual.cleanPreDefs preDefs
registerEqnsInfo preDefs preDefNonRec.declName fixedParamPerms argsPacker
for preDef in preDefs, wfPreprocessProof in wfPreprocessProofs do
unless preDef.kind.isTheorem do

View File

@@ -81,14 +81,13 @@ private partial def getFieldOrigin (structName field : Name) : MetaM StructureFi
return fi
open Meta in
private partial def printStructure (id : Name) (levelParams : List Name) (numParams : Nat) (type : Expr) (ctor : Name)
private partial def printStructure (id : Name) (levelParams : List Name) (numParams : Nat) (type : Expr)
(isUnsafe : Bool) : CommandElabM Unit := do
let env getEnv
let kind := if isClass env id then "class" else "structure"
let header mkHeader' kind id levelParams type isUnsafe (sig := false)
let levels := levelParams.map Level.param
liftTermElabM <| forallTelescope ( getConstInfo id).type fun params _ =>
let s := Expr.const id levels
let s := Expr.const id (levelParams.map .param)
withLocalDeclD `self (mkAppN s params) fun self => do
let mut m : MessageData := header
-- Signature
@@ -101,13 +100,15 @@ private partial def printStructure (id : Name) (levelParams : List Name) (numPar
unless parents.isEmpty do
m := m ++ Format.line ++ "parents:"
for parent in parents do
let ptype inferType (mkApp (mkAppN (.const parent.projFn levels) params) self)
let ptype inferType (mkApp (mkAppN (.const parent.projFn (levelParams.map .param)) params) self)
m := m ++ indentD m!"{.ofConstName parent.projFn (fullNames := true)} : {ptype}"
-- Fields
-- Collect params in a map for default value processing
let paramMap : NameMap Expr params.foldlM (init := {}) fun paramMap param => do
pure <| paramMap.insert ( param.fvarId!.getUserName) param
-- Collect autoParam tactics, which are all on the flat constructor:
let flatCtorName := mkFlatCtorOfStructCtorName ctor
let flatCtorInfo getConstInfo flatCtorName
let autoParams : NameMap Syntax forallTelescope flatCtorInfo.type fun args _ =>
let flatCtorName := mkFlatCtorOfStructName id
let autoParams : NameMap Syntax forallTelescope ( getConstInfo flatCtorName).type fun args _ =>
args[numParams:].foldlM (init := {}) fun set arg => do
let decl arg.fvarId!.getDecl
if let some (.const tacticDecl _) := decl.type.getAutoParamTactic? then
@@ -134,7 +135,9 @@ private partial def printStructure (id : Name) (levelParams : List Name) (numPar
let stx : TSyntax ``Parser.Tactic.tacticSeq := stx
pure m!" := by{indentD stx}"
else if let some defFn := getEffectiveDefaultFnForField? env id field then
if let some (_, val) instantiateStructDefaultValueFn? defFn levels params (pure fieldMap.find?) then
let cinfo getConstInfo defFn
let defValue instantiateValueLevelParams cinfo (levelParams.map .param)
if let some val processDefaultValue paramMap fieldMap defValue then
pure m!" :={indentExpr val}"
else
pure m!" := <error>"
@@ -153,6 +156,24 @@ private partial def printStructure (id : Name) (levelParams : List Name) (numPar
-- Omit proofs; the delaborator enables `pp.proofs` for non-constant proofs, but we don't want this for default values
withOptions (fun opts => opts.set pp.proofs.name false) do
logInfo m
where
processDefaultValue (paramMap : NameMap Expr) (fieldValues : NameMap Expr) : Expr MetaM (Option Expr)
| .lam n d b c => do
if c.isExplicit then
let some val := fieldValues.find? n | return none
if isDefEq ( inferType val) d then
processDefaultValue paramMap fieldValues (b.instantiate1 val)
else
return none
else
let some param := paramMap.find? n | return none
if isDefEq ( inferType param) d then
processDefaultValue paramMap fieldValues (b.instantiate1 param)
else
return none
| e =>
let_expr id _ a := e | return some e
return some a
private def printIdCore (id : Name) : CommandElabM Unit := do
let env getEnv
@@ -166,7 +187,7 @@ private def printIdCore (id : Name) : CommandElabM Unit := do
| ConstantInfo.recInfo { levelParams := us, type := t, isUnsafe := u, .. } => printAxiomLike "recursor" id us t u
| ConstantInfo.inductInfo { levelParams := us, numParams, type := t, ctors, isUnsafe := u, .. } =>
if isStructure env id then
printStructure id us numParams t ctors[0]! u
printStructure id us numParams t u
else
printInduct id us numParams t ctors u
| none => throwUnknownId id

File diff suppressed because it is too large Load Diff

View File

@@ -160,8 +160,6 @@ structure StructFieldInfo where
declName : Name
/-- Binder info to use when making the constructor. Only applies to those fields that will appear in the constructor. -/
binfo : BinderInfo
/-- Overrides for the parameters' binder infos when making the projections. The first component is a ref for the binder. -/
paramInfoOverrides : ExprMap (Syntax × BinderInfo) := {}
/--
Structure names that are responsible for this field being here.
- Empty if the field is a `newField`.
@@ -186,7 +184,7 @@ structure StructFieldInfo where
inheritedDefaults : Array (Name × StructFieldDefault) := #[]
/-- The default that will be used for this structure. -/
resolvedDefault? : Option StructFieldDefault := none
deriving Inhabited
deriving Inhabited, Repr
/-!
### View construction
@@ -512,6 +510,46 @@ private def reduceFieldProjs (e : Expr) (zetaDelta := true) : StructElabM Expr :
return TransformStep.continue
Meta.transform e (post := postVisit)
/-- Checks if the expression is of the form `S.mk x.1 ... x.n` with `n` nonzero
and `S.mk` a structure constructor with `S` one of the recorded structure parents.
Returns `x`.
Each projection `x.i` can be either a native projection or from a projection function. -/
private def etaStruct? (e : Expr) : StructElabM (Option Expr) := do
let .const f _ := e.getAppFn | return none
let some (ConstantInfo.ctorInfo fVal) := ( getEnv).find? f | return none
unless ( findParentFieldInfo? fVal.induct).isSome do return none
unless 0 < fVal.numFields && e.getAppNumArgs == fVal.numParams + fVal.numFields do return none
let args := e.getAppArgs
let some (S0, i0, x) getProjectedExpr args[fVal.numParams]! | return none
unless S0 == fVal.induct && i0 == 0 do return none
for i in [1 : fVal.numFields] do
let arg := args[fVal.numParams + i]!
let some (S', i', x') getProjectedExpr arg | return none
unless S' == fVal.induct && i' == i && x' == x do return none
return x
where
/-- Given an expression that's either a native projection or a registered projection
function, gives (1) the name of the structure type, (2) the index of the projection, and
(3) the object being projected. -/
getProjectedExpr (e : Expr) : MetaM (Option (Name × Nat × Expr)) := do
if let .proj S i x := e then
return (S, i, x)
if let .const fn _ := e.getAppFn then
if let some info getProjectionFnInfo? fn then
if e.getAppNumArgs == info.numParams + 1 then
if let some (ConstantInfo.ctorInfo fVal) := ( getEnv).find? info.ctorName then
return (fVal.induct, info.i, e.appArg!)
return none
/-- Runs `etaStruct?` over the whole expression. -/
private def etaStructReduce (e : Expr) : StructElabM Expr := do
let e instantiateMVars e
Meta.transform e (post := fun e => do
if let some e etaStruct? e then
return .done e
else
return .continue)
/--
Puts an expression into "field normal form".
- All projections of constructors for parent structures are reduced.
@@ -519,8 +557,7 @@ Puts an expression into "field normal form".
- Constructors of parent structures are eta reduced.
-/
private def fieldNormalizeExpr (e : Expr) (zetaDelta : Bool := true) : StructElabM Expr := do
let ancestors := ( get).ancestorFieldIdx
etaStructReduce (p := ancestors.contains) <| reduceFieldProjs e (zetaDelta := zetaDelta)
etaStructReduce <| reduceFieldProjs e (zetaDelta := zetaDelta)
private def fieldFromMsg (info : StructFieldInfo) : MessageData :=
if let some sourceStructName := info.sourceStructNames.head? then
@@ -529,25 +566,46 @@ private def fieldFromMsg (info : StructFieldInfo) : MessageData :=
m!"field '{info.name}'"
/--
Instantiates default value for field `fieldName` set at structure `structName`, using the field fvars in the `StructFieldInfo`s.
Instantiates default value for field `fieldName` set at structure `structName`.
The arguments for the `_default` auxiliary function are provided by `fieldMap`.
After default values are resolved, then the one that is added to the environment
as an `_inherited_default` auxiliary function is normalized;
we don't do those normalizations here, since that could be wasted effort if this default isn't chosen.
as an `_inherited_default` auxiliary function is normalized; we don't do those normalizations here.
-/
private partial def getFieldDefaultValue? (structName : Name) (params : Array Expr) (fieldName : Name) : StructElabM (Option Expr) := do
let some defFn := getDefaultFnForField? ( getEnv) structName fieldName
| return none
let fieldVal? (n : Name) : StructElabM (Option Expr) := do
let some info findFieldInfo? n | return none
return info.fvar
let some (_, val) instantiateStructDefaultValueFn? defFn none params fieldVal?
| logWarning m!"default value for field '{fieldName}' of structure '{.ofConstName structName}' could not be instantiated, ignoring"
return none
return val
private partial def getFieldDefaultValue? (structName : Name) (paramMap : NameMap Expr) (fieldName : Name) : StructElabM (Option Expr) := do
match getDefaultFnForField? ( getEnv) structName fieldName with
| none => return none
| some defaultFn =>
let cinfo getConstInfo defaultFn
let us mkFreshLevelMVarsFor cinfo
go? ( instantiateValueLevelParams cinfo us)
where
failed : MetaM (Option Expr) := do
logWarning m!"ignoring default value for field '{fieldName}' defined at '{.ofConstName structName}'"
return none
private def getFieldDefault? (structName : Name) (params : Array Expr) (fieldName : Name) :
go? (e : Expr) : StructElabM (Option Expr) := do
match e with
| Expr.lam n d b c =>
if c.isExplicit then
let some info findFieldInfo? n | failed
let valType inferType info.fvar
if ( isDefEq valType d) then
go? (b.instantiate1 info.fvar)
else
failed
else
let some param := paramMap.find? n | return none
if isDefEq ( inferType param) d then
go? (b.instantiate1 param)
else
failed
| e =>
let r := if e.isAppOfArity ``id 2 then e.appArg! else e
return some ( reduceFieldProjs r)
private def getFieldDefault? (structName : Name) (paramMap : NameMap Expr) (fieldName : Name) :
StructElabM (Option StructFieldDefault) := do
if let some val getFieldDefaultValue? structName params fieldName then
if let some val getFieldDefaultValue? structName (paramMap : NameMap Expr) fieldName then
-- Important: we use `getFieldDefaultValue?` because we want default value definitions, not *inherited* ones, to properly handle diamonds
trace[Elab.structure] "found default value for '{fieldName}' from '{.ofConstName structName}'{indentExpr val}"
return StructFieldDefault.optParam val
@@ -572,7 +630,7 @@ Adds `fieldName` of type `fieldType` from structure `structName`.
See `withStructFields` for meanings of other arguments.
-/
private partial def withStructField (view : StructView) (sourceStructNames : List Name) (inSubobject? : Option Expr)
(structName : Name) (params : Array Expr) (fieldName : Name) (fieldType : Expr)
(structName : Name) (paramMap : NameMap Expr) (fieldName : Name) (fieldType : Expr)
(k : Expr StructElabM α) : StructElabM α := do
trace[Elab.structure] "withStructField '{.ofConstName structName}', field '{fieldName}'"
let fieldType instantiateMVars fieldType
@@ -591,7 +649,7 @@ private partial def withStructField (view : StructView) (sourceStructNames : Lis
let existingFieldType inferType existingField.fvar
unless ( isDefEq fieldType existingFieldType) do
throwError "field type mismatch, field '{fieldName}' from parent '{.ofConstName structName}' {← mkHasTypeButIsExpectedMsg fieldType existingFieldType}"
if let some d getFieldDefault? structName params fieldName then
if let some d getFieldDefault? structName paramMap fieldName then
addFieldInheritedDefault fieldName structName d
k existingField.fvar
else
@@ -603,7 +661,6 @@ private partial def withStructField (view : StructView) (sourceStructNames : Lis
declName applyVisibility ( toVisibility fieldInfo) declName
-- No need to validate links because this docstring was already added to the environment previously
addDocStringCore' declName ( findDocString? ( getEnv) fieldInfo.projFn)
addDeclarationRangesFromSyntax declName ( getRef)
checkNotAlreadyDeclared declName
withLocalDecl fieldName fieldInfo.binderInfo ( reduceFieldProjs fieldType) fun fieldFVar => do
let projExpr? inSubobject?.mapM fun subobject => mkProjection subobject fieldName
@@ -618,7 +675,7 @@ private partial def withStructField (view : StructView) (sourceStructNames : Lis
binfo := fieldInfo.binderInfo
projFn? := fieldInfo.projFn
}
if let some d getFieldDefault? structName params fieldName then
if let some d getFieldDefault? structName paramMap fieldName then
addFieldInheritedDefault fieldName structName d
k fieldFVar
@@ -631,7 +688,7 @@ Does not add a parent field for the structure itself; that is done by `withStruc
- the continuation `k` is run with a constructor expression for this structure
-/
private partial def withStructFields (view : StructView) (sourceStructNames : List Name)
(structType : Expr) (inSubobject? : Option Expr)
(structType : Expr) (inSubobject? : Option Expr) (paramMap : NameMap Expr)
(k : Expr StructElabM α) : StructElabM α := do
let structName getStructureName structType
let .const _ us := structType.getAppFn | unreachable!
@@ -667,7 +724,7 @@ private partial def withStructFields (view : StructView) (sourceStructNames : Li
let fieldName := fields[i]
let fieldMVar := fieldMVars[i]!
let fieldType inferType fieldMVar
withStructField view sourceStructNames inSubobject? structName params fieldName fieldType fun fieldFVar => do
withStructField view sourceStructNames inSubobject? structName paramMap fieldName fieldType fun fieldFVar => do
fieldMVar.mvarId!.assign fieldFVar
goFields (i + 1)
else
@@ -718,7 +775,14 @@ private partial def withStruct (view : StructView) (sourceStructNames : List Nam
let allFields := getStructureFieldsFlattened env structName (includeSubobjectFields := false)
let withStructFields' (kind : StructFieldKind) (inSubobject? : Option Expr) (k : StructFieldInfo StructElabM α) : StructElabM α := do
withStructFields view sourceStructNames structType inSubobject? fun structVal => do
-- Create a parameter map for default value processing
let info getConstInfoInduct structName
let paramMap : NameMap Expr forallTelescope info.type fun xs _ => do
let mut paramMap := {}
for param in params, x in xs do
paramMap := paramMap.insert ( x.fvarId!.getUserName) param
return paramMap
withStructFields view sourceStructNames structType inSubobject? paramMap fun structVal => do
if let some _ findFieldInfo? structFieldName then
throwErrorAt projRef "field '{structFieldName}' has already been declared\n\n\
The 'toParent : P' syntax can be used to adjust the name for the parent projection"
@@ -727,7 +791,7 @@ private partial def withStruct (view : StructView) (sourceStructNames : List Nam
-- which for inherited fields might not have been seen yet.
-- Note: duplication is ok for now. We use a stable sort later.
for fieldName in allFields do
if let some d getFieldDefault? structName params fieldName then
if let some d getFieldDefault? structName paramMap fieldName then
addFieldInheritedDefault fieldName structName d
withLetDecl rawStructFieldName structType structVal fun structFVar => do
let info : StructFieldInfo := {
@@ -897,58 +961,23 @@ private def solveParentMVars (e : Expr) : StructElabM Expr := do
discard <| MVarId.checkedAssign mvar parentInfo.fvar
return e
open Parser.Term in
private def typelessBinder? : Syntax Option ((Array Ident) × BinderInfo)
| `(bracketedBinderF|($ids:ident*)) => some (ids, .default)
| `(bracketedBinderF|{$ids:ident*}) => some (ids, .implicit)
| `(bracketedBinderF|$ids:ident*) => some (ids, .strictImplicit)
| `(bracketedBinderF|[$id:ident]) => some (#[id], .instImplicit)
| _ => none
/--
Takes a binder list and interprets the prefix to see if any could be construed to be binder info updates.
Returns the binder list without these updates along with the new binder infos for these parameters.
-/
private def elabParamInfoUpdates (structParams : Array Expr) (binders : Array Syntax) : StructElabM (Array Syntax × ExprMap (Syntax × BinderInfo)) := do
let mut overrides : ExprMap (Syntax × BinderInfo) := {}
for i in [0:binders.size] do
match typelessBinder? binders[i]! with
| none => return (binders.extract i, overrides)
| some (ids, bi) =>
let lctx getLCtx
let decls := ids.filterMap fun id => lctx.findFromUserName? id.getId
-- Filter out all fields. We assume the remaining fvars are the possible parameters.
let decls decls.filterM fun decl => return ( findFieldInfoByFVarId? decl.fvarId).isNone
if decls.size != ids.size then
-- Then either these are for a new variables or the binder isn't only for parameters
return (binders.extract i, overrides)
for decl in decls, id in ids do
Term.addTermInfo' id decl.toExpr
unless structParams.contains decl.toExpr do
throwErrorAt id m!"only parameters appearing in the declaration header may have their binders kinds be overridden\n\n\
If this is not intended to be an override, use a binder with a type, for example '(x : _)'."
overrides := overrides.insert decl.toExpr (id, bi)
return (#[], overrides)
private def elabFieldTypeValue (structParams : Array Expr) (view : StructFieldView) :
StructElabM (Option Expr × ExprMap (Syntax × BinderInfo) × Option StructFieldDefault) := do
private def elabFieldTypeValue (view : StructFieldView) : StructElabM (Option Expr × Option StructFieldDefault) := do
let state get
let binders := view.binders.getArgs
let (binders, paramInfoOverrides) elabParamInfoUpdates structParams binders
Term.withAutoBoundImplicit <| Term.withAutoBoundImplicitForbiddenPred (fun n => view.name == n) <| Term.elabBinders binders fun params => do
Term.withAutoBoundImplicit <| Term.withAutoBoundImplicitForbiddenPred (fun n => view.name == n) <| Term.elabBinders view.binders.getArgs fun params => do
match view.type? with
| none =>
| none =>
match view.default? with
| none => return (none, paramInfoOverrides, none)
| none => return (none, none)
| some (.optParam valStx) =>
Term.synthesizeSyntheticMVarsNoPostponing
-- TODO: add forbidden predicate using `shortDeclName` from `view`
let params Term.addAutoBoundImplicits params (view.nameId.getTailPos? (canonicalOnly := true))
let value Term.withoutAutoBoundImplicit <| Term.elabTerm valStx none
let value runStructElabM (init := state) <| solveParentMVars value
registerFailedToInferFieldType view.name ( inferType value) view.nameId
registerFailedToInferDefaultValue view.name value valStx
let value mkLambdaFVars params value
return (none, paramInfoOverrides, StructFieldDefault.optParam value)
return (none, StructFieldDefault.optParam value)
| some (.autoParam tacticStx) =>
throwErrorAt tacticStx "invalid field declaration, type must be provided when auto-param tactic is used"
| some typeStx =>
@@ -958,9 +987,9 @@ private def elabFieldTypeValue (structParams : Array Expr) (view : StructFieldVi
Term.synthesizeSyntheticMVarsNoPostponing
let params Term.addAutoBoundImplicits params (view.nameId.getTailPos? (canonicalOnly := true))
match view.default? with
| none =>
| none =>
let type mkForallFVars params type
return (type, paramInfoOverrides, none)
return (type, none)
| some (.optParam valStx) =>
let value Term.withoutAutoBoundImplicit <| Term.elabTermEnsuringType valStx type
let value runStructElabM (init := state) <| solveParentMVars value
@@ -968,14 +997,14 @@ private def elabFieldTypeValue (structParams : Array Expr) (view : StructFieldVi
Term.synthesizeSyntheticMVarsNoPostponing
let type mkForallFVars params type
let value mkLambdaFVars params value
return (type, paramInfoOverrides, StructFieldDefault.optParam value)
return (type, StructFieldDefault.optParam value)
| some (.autoParam tacticStx) =>
let name := mkAutoParamFnOfProjFn view.declName
discard <| Term.declareTacticSyntax tacticStx name
let type mkForallFVars params type
return (type, paramInfoOverrides, StructFieldDefault.autoParam <| .const name [])
return (type, StructFieldDefault.autoParam <| .const name [])
private partial def withFields (structParams : Array Expr) (views : Array StructFieldView) (k : StructElabM α) : StructElabM α := do
private partial def withFields (views : Array StructFieldView) (k : StructElabM α) : StructElabM α := do
go 0
where
go (i : Nat) : StructElabM α := do
@@ -986,14 +1015,14 @@ where
throwError "field '{view.name}' has already been declared as a projection for parent '{.ofConstName parent.structName}'"
match findFieldInfo? view.name with
| none =>
let (type?, paramInfoOverrides, default?) elabFieldTypeValue structParams view
let (type?, default?) elabFieldTypeValue view
match type?, default? with
| none, none => throwError "invalid field, type expected"
| some type, _ =>
withLocalDecl view.rawName view.binderInfo type fun fieldFVar => do
addFieldInfo { ref := view.nameId, sourceStructNames := [],
name := view.name, declName := view.declName, fvar := fieldFVar, default? := default?,
binfo := view.binderInfo, paramInfoOverrides,
binfo := view.binderInfo,
kind := StructFieldKind.newField }
go (i+1)
| none, some (.optParam value) =>
@@ -1001,7 +1030,7 @@ where
withLocalDecl view.rawName view.binderInfo type fun fieldFVar => do
addFieldInfo { ref := view.nameId, sourceStructNames := [],
name := view.name, declName := view.declName, fvar := fieldFVar, default? := default?,
binfo := view.binderInfo, paramInfoOverrides,
binfo := view.binderInfo,
kind := StructFieldKind.newField }
go (i+1)
| none, some (.autoParam _) =>
@@ -1017,12 +1046,8 @@ where
if info.default?.isSome then
throwError "field '{view.name}' new default value has already been set"
let mut valStx := valStx
let (binders, paramInfoOverrides) elabParamInfoUpdates structParams view.binders.getArgs
unless paramInfoOverrides.isEmpty do
let params := MessageData.joinSep (paramInfoOverrides.toList.map (m!"{·.1}")) ", "
throwError "cannot override structure parameter binder kinds when overriding the default value: {params}"
if binders.size > 0 then
valStx `(fun $binders* => $valStx:term)
if view.binders.getArgs.size > 0 then
valStx `(fun $(view.binders.getArgs)* => $valStx:term)
let fvarType inferType info.fvar
let value Term.elabTermEnsuringType valStx fvarType
registerFailedToInferDefaultValue view.name value valStx
@@ -1125,9 +1150,11 @@ Assumes the inductive type has already been added to the environment.
Note: we can't generally use optParams here since the default values might depend on previous ones.
We include autoParams however.
-/
private def mkFlatCtorExpr (levelParams : List Name) (params : Array Expr) (ctor : ConstructorVal) (replaceIndFVars : Expr MetaM Expr) :
private def mkFlatCtorExpr (levelParams : List Name) (params : Array Expr) (structName : Name) (replaceIndFVars : Expr MetaM Expr) :
StructElabM Expr := do
let env getEnv
-- build the constructor application using the fields in the local context
let ctor := getStructureCtor env structName
let mut val := mkAppN (mkConst ctor.name (levelParams.map mkLevelParam)) params
let fieldInfos := ( get).fields
for fieldInfo in fieldInfos do
@@ -1146,20 +1173,17 @@ private def mkFlatCtorExpr (levelParams : List Name) (params : Array Expr) (ctor
| _ => pure decl.type
let type zetaDeltaFVars ( instantiateMVars type) parentFVars
let type replaceIndFVars type
return .lam decl.userName.eraseMacroScopes type (val.abstract #[fieldInfo.fvar]) decl.binderInfo
return .lam decl.userName type (val.abstract #[fieldInfo.fvar]) decl.binderInfo
val mkLambdaFVars params val
val replaceIndFVars val
fieldNormalizeExpr val
private partial def mkFlatCtor (levelParams : List Name) (params : Array Expr) (structName : Name) (replaceIndFVars : Expr MetaM Expr) :
StructElabM Unit := do
let env getEnv
let ctor := getStructureCtor env structName
let val mkFlatCtorExpr levelParams params ctor replaceIndFVars
let val mkFlatCtorExpr levelParams params structName replaceIndFVars
withLCtx {} {} do trace[Elab.structure] "created flat constructor:{indentExpr val}"
unless val.hasSyntheticSorry do
-- Note: flatCtorName will be private if the constructor is private
let flatCtorName := mkFlatCtorOfStructCtorName ctor.name
let flatCtorName := mkFlatCtorOfStructName structName
let valType replaceIndFVars ( instantiateMVars ( inferType val))
let valType := valType.inferImplicit params.size true
addDecl <| Declaration.defnDecl ( mkDefinitionValInferrringUnsafe flatCtorName levelParams valType val .abbrev)
@@ -1174,16 +1198,11 @@ private partial def checkResultingUniversesForFields (fieldInfos : Array StructF
which is not less than or equal to the structure's resulting universe level{indentD u}"
throwErrorAt info.ref msg
private def addProjections (params : Array Expr) (r : ElabHeaderResult) (fieldInfos : Array StructFieldInfo) : TermElabM Unit := do
let projDecls : Array StructProjDecl
private def addProjections (r : ElabHeaderResult) (fieldInfos : Array StructFieldInfo) : TermElabM Unit := do
let projDecls : Array StructProjDecl :=
fieldInfos
|>.filter (·.kind.isInCtor)
|>.mapM (fun info => do
info.paramInfoOverrides.forM fun p (ref, _) => do
unless params.contains p do
throwErrorAt ref "invalid parameter binder update, not a parameter"
let paramInfoOverrides := params |>.map (fun param => info.paramInfoOverrides[param]?.map Prod.snd) |>.toList
return { ref := info.ref, projName := info.declName, paramInfoOverrides })
|>.map (fun info => { ref := info.ref, projName := info.declName })
mkProjections r.view.declName projDecls r.view.isClass
for fieldInfo in fieldInfos do
if fieldInfo.kind.isSubobject then
@@ -1199,8 +1218,8 @@ private def registerStructure (structName : Name) (infos : Array StructFieldInfo
fieldName := info.name
projFn := info.declName
binderInfo := info.binfo
autoParam? := if let some (.autoParam tactic) := info.resolvedDefault? then some tactic else none
subobject? := if let .subobject parentName := info.kind then parentName else none
autoParam? := none -- deprecated field
}
else
return none
@@ -1374,8 +1393,8 @@ private def mkRemainingProjections (levelParams : List Name) (params : Array Exp
-- No need to zeta delta reduce; `fvarToConst` has replaced such fvars.
let val fieldNormalizeExpr val (zetaDelta := false)
fvarToConst := fvarToConst.insert field.fvar val
-- TODO(kmill): if it is a direct parent, try adding the coercion function from the environment and use that instead of `val`.
-- (This should be evaluated to see if it is a good idea.)
-- TODO(kmill): if it is a direct parent, add the coercion function the environment and use that instead of `val`,
-- and evaluate the difference.
else
throwError m!"(mkRemainingProjections internal error) {field.name} has no value"
@@ -1431,7 +1450,7 @@ def elabStructureCommand : InductiveElabDescr where
view := view.toInductiveView
elabCtors := fun rs r params => runStructElabM do
withParents view rs r.indFVar do
withFields params view.fields do
withFields view.fields do
withRef view.ref do
Term.synthesizeSyntheticMVarsNoPostponing
resolveFieldDefaults view.declName
@@ -1446,31 +1465,30 @@ def elabStructureCommand : InductiveElabDescr where
collectUsedFVars := collectUsedFVars lctx localInsts fieldInfos
checkUniverses := fun _ u => withLCtx lctx localInsts do checkResultingUniversesForFields fieldInfos u
finalizeTermElab := withLCtx lctx localInsts do checkDefaults fieldInfos
prefinalize := fun levelParams params replaceIndFVars => do
prefinalize := fun _ _ _ => do
withLCtx lctx localInsts do
addProjections params r fieldInfos
addProjections r fieldInfos
registerStructure view.declName fieldInfos
runStructElabM (init := state) do
mkFlatCtor levelParams params view.declName replaceIndFVars
addDefaults levelParams params replaceIndFVars
let parentInfos withLCtx lctx localInsts <| runStructElabM (init := state) do
mkRemainingProjections levelParams params view
setStructureParents view.declName parentInfos
withSaveInfoContext do -- save new env
for field in view.fields do
-- may not exist if overriding inherited field
if ( getEnv).contains field.declName then
Term.addTermInfo' field.ref ( mkConstWithLevelParams field.declName) (isBinder := true)
finalize := fun levelParams params replaceIndFVars => do
let parentInfos runStructElabM (init := state) <| withLCtx lctx localInsts <| mkRemainingProjections levelParams params view
withSaveInfoContext do
-- Add terminfo for parents now that all parent projections exist.
for parent in parents do
if parent.addTermInfo then
Term.addTermInfo' parent.ref ( mkConstWithLevelParams parent.declName) (isBinder := true)
setStructureParents view.declName parentInfos
checkResolutionOrder view.declName
return {
finalize := do
if view.isClass then
addParentInstances parentInfos
}
if view.isClass then
addParentInstances parentInfos
runStructElabM (init := state) <| withLCtx lctx localInsts do
mkFlatCtor levelParams params view.declName replaceIndFVars
addDefaults levelParams params replaceIndFVars
}
}

View File

@@ -282,7 +282,7 @@ private def throwStuckAtUniverseCnstr : TermElabM Unit := do
of getting a mysterious type mismatch constraint, we get a list of
universe constraints the system is stuck at.
-/
private def processPostponedUniverseConstraints : TermElabM Unit := do
private def processPostponedUniverseContraints : TermElabM Unit := do
unless ( processPostponed (mayPostpone := false) (exceptionOnFailure := true)) do
throwStuckAtUniverseCnstr
@@ -485,7 +485,7 @@ mutual
reportStuckSyntheticMVars ignoreStuckTC
loop ()
if postpone == .no then
processPostponedUniverseConstraints
processPostponedUniverseContraints
end
def synthesizeSyntheticMVarsNoPostponing (ignoreStuckTC := false) : TermElabM Unit :=

View File

@@ -20,6 +20,8 @@ def elabAsAuxLemma : Lean.Elab.Tactic.Tactic
unless mvars.isEmpty do
throwError "Cannot abstract term into auxiliary lemma because there are open goals."
let e instantiateMVars (mkMVar mvarId)
let e mkAuxTheorem (prefix? := ( Term.getDeclName?)) ( mvarId.getType) e
let env getEnv
-- TODO: this likely should share name creation code with `mkAuxLemma`
let e mkAuxTheorem ( mkFreshUserName <| env.asyncPrefix?.getD env.mainModule ++ `_auxLemma) ( mvarId.getType) e
mvarId.assign e
| _ => throwError "Invalid as_aux_lemma syntax"

View File

@@ -35,15 +35,15 @@ def mkContext (lratPath : System.FilePath) (cfg : BVDecideConfig) : TermElabM Ta
/--
Prepare an `Expr` that proves `bvExpr.unsat` using `ofReduceBool`.
-/
def lratChecker (ctx : TacticContext) (reflectionResult : ReflectionResult) : MetaM Expr := do
def lratChecker (ctx : TacticContext) (bvExpr : BVLogicalExpr) : MetaM Expr := do
let cert LratCert.ofFile ctx.lratPath ctx.config.trimProofs
cert.toReflectionProof ctx reflectionResult
cert.toReflectionProof ctx bvExpr ``verifyBVExpr ``unsat_of_verifyBVExpr_eq_true
@[inherit_doc Lean.Parser.Tactic.bvCheck]
def bvCheck (g : MVarId) (ctx : TacticContext) : MetaM Unit := do
let unsatProver : UnsatProver := fun _ reflectionResult _ => do
withTraceNode `Meta.Tactic.sat (fun _ => return "Preparing LRAT reflection term") do
let proof lratChecker ctx reflectionResult
let proof lratChecker ctx reflectionResult.bvExpr
return .ok proof, ""
let _ closeWithBVReflection g unsatProver
return ()

View File

@@ -79,22 +79,9 @@ def reconstructCounterExample (var2Cnf : Std.HashMap BVBit Nat) (assignment : Ar
return finalMap
structure ReflectionResult where
/--
The reflected expression.
-/
bvExpr : BVLogicalExpr
/--
Function to prove `False` given a satisfiability proof of `bvExpr`
-/
proveFalse : Expr M Expr
/--
Set of unused hypotheses for diagnostic purposes.
-/
unusedHypotheses : Std.HashSet FVarId
/--
A cache for `toExpr bvExpr`.
-/
expr : Expr
/--
A counter example generated from the bitblaster.
@@ -270,54 +257,6 @@ def explainCounterExampleQuality (counterExample : CounterExample) : MetaM Messa
err := diagnosis.derivedEquations.foldl (init := err) folder
return err
/--
Turn an `LratCert` into a proof that some `reflectedExpr` is UNSAT.
-/
def LratCert.toReflectionProof (cert : LratCert) (cfg : TacticContext)
(reflectionResult : ReflectionResult) : MetaM Expr := do
withTraceNode `Meta.Tactic.sat (fun _ => return "Compiling expr term") do
mkAuxDecl cfg.exprDef reflectionResult.expr (mkConst ``BVLogicalExpr)
withTraceNode `Meta.Tactic.sat (fun _ => return "Compiling proof certificate term") do
mkAuxDecl cfg.certDef (toExpr cert) (mkConst ``String)
let reflectedExpr := mkConst cfg.exprDef
let certExpr := mkConst cfg.certDef
withTraceNode `Meta.Tactic.sat (fun _ => return "Compiling reflection proof term") do
let auxValue := mkApp2 (mkConst ``verifyBVExpr) reflectedExpr certExpr
mkAuxDecl cfg.reflectionDef auxValue (mkConst ``Bool)
let auxType mkEq (mkConst cfg.reflectionDef) (toExpr true)
let auxProof :=
mkApp3
(mkConst ``Lean.ofReduceBool)
(mkConst cfg.reflectionDef)
(toExpr true)
( mkEqRefl (toExpr true))
try
let auxLemma
-- disable async TC so we can catch its exceptions
withOptions (Elab.async.set · false) do
withTraceNode `Meta.Tactic.sat (fun _ => return "Verifying LRAT certificate") do
mkAuxLemma [] auxType auxProof
return mkApp3 (mkConst ``unsat_of_verifyBVExpr_eq_true) reflectedExpr certExpr (mkConst auxLemma)
catch e =>
throwError m!"Failed to check the LRAT certificate in the kernel:\n{e.toMessageData}"
where
/--
Add an auxiliary declaration. Only used to create constants that appear in our reflection proof.
-/
mkAuxDecl (name : Name) (value type : Expr) : CoreM Unit :=
addAndCompile <| .defnDecl {
name := name,
levelParams := [],
type := type,
value := value,
hints := .abbrev,
safety := .safe
}
def lratBitblaster (goal : MVarId) (ctx : TacticContext) (reflectionResult : ReflectionResult)
(atomsAssignment : Std.HashMap Nat (Nat × Expr × Bool)) :
MetaM (Except CounterExample UnsatProver.Result) := do
@@ -348,13 +287,14 @@ def lratBitblaster (goal : MVarId) (ctx : TacticContext) (reflectionResult : Ref
match res with
| .ok cert =>
trace[Meta.Tactic.sat] "SAT solver found a proof."
let proof cert.toReflectionProof ctx reflectionResult
let proof cert.toReflectionProof ctx bvExpr ``verifyBVExpr ``unsat_of_verifyBVExpr_eq_true
return .ok proof, cert
| .error assignment =>
trace[Meta.Tactic.sat] "SAT solver found a counter example."
let equations := reconstructCounterExample map assignment aigSize atomsAssignment
return .error { goal, unusedHypotheses := reflectionResult.unusedHypotheses, equations }
def reflectBV (g : MVarId) : M ReflectionResult := g.withContext do
let hyps getPropHyps
let mut sats := #[]
@@ -374,12 +314,12 @@ def reflectBV (g : MVarId) : M ReflectionResult := g.withContext do
else
let sat := sats[1:].foldl (init := sats[0]) SatAtBVLogical.and
return {
bvExpr := ShareCommon.shareCommon sat.bvExpr,
bvExpr := sat.bvExpr,
proveFalse := sat.proveFalse,
unusedHypotheses := unusedHypotheses,
expr := sat.expr
unusedHypotheses := unusedHypotheses
}
def closeWithBVReflection (g : MVarId) (unsatProver : UnsatProver) :
MetaM (Except CounterExample LratCert) := M.run do
g.withContext do

View File

@@ -142,11 +142,6 @@ structure State where
contained in `atoms`.
-/
atomsAssignmentCache : Option Expr := none
/--
Cached calls to `evalsAtAtoms` of various reflection structures. Whenever `atoms` is modified
this cache is invalidated as `evalsAtAtoms` relies on `atoms`.
-/
evalsAtCache : Std.HashMap Expr (Option Expr) := {}
/--
The reflection monad, used to track `BitVec` variables that we see as we traverse the context.
@@ -163,26 +158,14 @@ structure ReifiedBVExpr where
-/
bvExpr : BVExpr width
/--
The expression that was reflected, used for caching of `evalsAtAtoms`.
A proof that `bvExpr.eval atomsAssignment = originalBVExpr`, none if it holds by `rfl`.
-/
originalExpr : Expr
/--
A proof that `bvExpr.eval atomsAssignment = originalExpr`, none if it holds by `rfl`.
-/
evalsAtAtoms' : M (Option Expr)
evalsAtAtoms : M (Option Expr)
/--
A cache for `toExpr bvExpr`.
-/
expr : Expr
def ReifiedBVExpr.evalsAtAtoms (reified : ReifiedBVExpr) : M (Option Expr) := do
match ( get).evalsAtCache[reified.originalExpr]? with
| some hit => return hit
| none =>
let proof? reified.evalsAtAtoms'
modify fun s => { s with evalsAtCache := s.evalsAtCache.insert reified.originalExpr proof? }
return proof?
/--
A reified version of an `Expr` representing a `BVPred`.
-/
@@ -192,26 +175,14 @@ structure ReifiedBVPred where
-/
bvPred : BVPred
/--
The expression that was reflected, usef for caching of `evalsAtAtoms`.
A proof that `bvPred.eval atomsAssignment = originalBVPredExpr`, none if it holds by `rfl`.
-/
originalExpr : Expr
/--
A proof that `bvPred.eval atomsAssignment = originalExpr`, none if it holds by `rfl`.
-/
evalsAtAtoms' : M (Option Expr)
evalsAtAtoms : M (Option Expr)
/--
A cache for `toExpr bvPred`
-/
expr : Expr
def ReifiedBVPred.evalsAtAtoms (reified : ReifiedBVPred) : M (Option Expr) := do
match ( get).evalsAtCache[reified.originalExpr]? with
| some hit => return hit
| none =>
let proof? reified.evalsAtAtoms'
modify fun s => { s with evalsAtCache := s.evalsAtCache.insert reified.originalExpr proof? }
return proof?
/--
A reified version of an `Expr` representing a `BVLogicalExpr`.
-/
@@ -221,26 +192,14 @@ structure ReifiedBVLogical where
-/
bvExpr : BVLogicalExpr
/--
The expression that was reflected, usef for caching of `evalsAtAtoms`.
A proof that `bvExpr.eval atomsAssignment = originalBVLogicalExpr`, none if it holds by `rfl`.
-/
originalExpr : Expr
/--
A proof that `bvExpr.eval atomsAssignment = originalExpr`, none if it holds by `rfl`.
-/
evalsAtAtoms' : M (Option Expr)
evalsAtAtoms : M (Option Expr)
/--
A cache for `toExpr bvExpr`
-/
expr : Expr
def ReifiedBVLogical.evalsAtAtoms (reified : ReifiedBVLogical) : M (Option Expr) := do
match ( get).evalsAtCache[reified.originalExpr]? with
| some hit => return hit
| none =>
let proof? reified.evalsAtAtoms'
modify fun s => { s with evalsAtCache := s.evalsAtCache.insert reified.originalExpr proof? }
return proof?
/--
A reified version of an `Expr` representing a `BVLogicalExpr` that we know to be true.
-/
@@ -307,15 +266,7 @@ def lookup (e : Expr) (width : Nat) (synthetic : Bool) : M Nat := do
trace[Meta.Tactic.bv] "New atom of width {width}, synthetic? {synthetic}: {e}"
let ident modifyGetThe State fun s =>
let newAtom := { width, synthetic, atomNumber := s.atoms.size}
let newAtomNumber := s.atoms.size
let s := {
s with
atoms := s.atoms.insert e newAtom,
-- must clear the caches as they depend on `atoms`.
atomsAssignmentCache := none
evalsAtCache := {}
}
(newAtomNumber, s)
(s.atoms.size, { s with atoms := s.atoms.insert e newAtom, atomsAssignmentCache := none })
return ident
@[specialize]

Some files were not shown because too many files have changed in this diff Show More