Compare commits

..

1 Commits

Author SHA1 Message Date
Leonardo de Moura
96dd4e84c1 feat: support for ground terms at DiscrTree.lean
WIP for discussing with @joehendrix
2023-10-15 19:14:07 -07:00
833 changed files with 2959 additions and 7724 deletions

View File

@@ -33,7 +33,7 @@ assignees: ''
### Versions
[Output of `#eval Lean.versionString` or of `lean --version` in the folder that the issue occured in]
[Output of `lean --version` in the folder that the issue occured in]
[OS version]
### Additional Information

View File

@@ -1,13 +1,14 @@
# Read and remove this section before submitting
* [ ] Put an X in this bracket to confirm you have read the
[External Contribution Guidelines](https://github.com/leanprover/lean4/blob/master/doc/contributions.md).
* Ensure your PR follows the [External Contribution Guidelines](https://github.com/leanprover/lean4/blob/master/CONTRIBUTING.md).
* Please make sure the PR has excellent documentation and tests. If we label it `missing documentation` or `missing tests` then it needs fixing!
* Add the link to your `RFC` or `bug` issue below.
* If the issue does not already have approval from a developer, submit the PR as draft.
* Remove this section before submitting.
* Please put the link to your `RFC` or `bug` issue here.
PRs missing this link will be marked as `missing RFC`.
You can manage the `awaiting-review`, `awaiting-author`, and `WIP` labels yourself, by writing a comment containing one of these labels on its own line.
* If that issue does not already have approval from a developer,
please be sure to open this PR in "Draft" mode.
# Summary
* Please make sure the PR has excellent documentation and tests.
If we label it `missing documentation` or `missing tests` then it needs fixing!
Link to `RFC` or `bug` issue:
* You can manage the `awaiting-review`, `awaiting-author`, and `WIP` labels
yourself, by writing a comment containing one of these labels on its own line.

View File

@@ -6,7 +6,8 @@ on:
tags:
- '*'
pull_request:
merge_group:
branches:
- master
schedule:
- cron: '0 7 * * *' # 8AM CET/11PM PT
@@ -15,185 +16,18 @@ concurrency:
cancel-in-progress: true
jobs:
# This job determines various settings for the following CI runs; see the `outputs` for details
configure:
set-nightly:
runs-on: ubuntu-latest
outputs:
# Should we run only a quick CI? Yes on a pull request without the full-ci label
quick: ${{ steps.set-quick.outputs.quick }}
# The build matrix, dynamically generated here
matrix: ${{ steps.set-matrix.outputs.result }}
# Should we make a nightly release? If so, this output contains the lean version string, else it is empty
nightly: ${{ steps.set-nightly.outputs.nightly }}
# Should this be the CI for a tagged release?
# Yes only if a tag is pushed to the `leanprover` repository, and the tag is "v" followed by a valid semver.
# It sets `set-release.outputs.RELEASE_TAG` to the tag
# and sets `set-release.outputs.{LEAN_VERSION_MAJOR,LEAN_VERSION_MINOR,LEAN_VERSION_PATCH,LEAN_SPECIAL_VERSION_DESC}`
# to the semver components parsed via regex.
LEAN_VERSION_MAJOR: ${{ steps.set-release.outputs.LEAN_VERSION_MAJOR }}
LEAN_VERSION_MINOR: ${{ steps.set-release.outputs.LEAN_VERSION_MINOR }}
LEAN_VERSION_PATCH: ${{ steps.set-release.outputs.LEAN_VERSION_PATCH }}
LEAN_SPECIAL_VERSION_DESC: ${{ steps.set-release.outputs.LEAN_SPECIAL_VERSION_DESC }}
RELEASE_TAG: ${{ steps.set-release.outputs.RELEASE_TAG }}
nightly: ${{ steps.set.outputs.nightly }}
steps:
- name: Run quick CI?
id: set-quick
env:
quick: ${{
github.event_name == 'pull_request' && !contains( github.event.pull_request.labels.*.name, 'full-ci')
}}
run: |
echo "quick=${{env.quick}}" >> $GITHUB_OUTPUT
- name: Configure build matrix
id: set-matrix
uses: actions/github-script@v3
with:
script: |
const quick = ${{ steps.set-quick.outputs.quick }};
console.log(`quick: ${quick}`)
let matrix = [
{
// portable release build: use channel with older glibc (2.27)
"name": "Linux LLVM",
"os": "ubuntu-latest",
"release": false,
"quick": false,
"shell": "nix-shell --arg pkgsDist \"import (fetchTarball \\\"channel:nixos-19.03\\\") {{}}\" --run \"bash -euxo pipefail {0}\"",
"llvm-url": "https://github.com/leanprover/lean-llvm/releases/download/15.0.1/lean-llvm-x86_64-linux-gnu.tar.zst",
"prepare-llvm": "../script/prepare-llvm-linux.sh lean-llvm*",
"binary-check": "ldd -v",
// foreign code may be linked against more recent glibc
// reverse-ffi needs to be updated to link to LLVM libraries
"CTEST_OPTIONS": "-E 'foreign|leanlaketest_reverse-ffi'",
"CMAKE_OPTIONS": "-DLLVM=ON -DLLVM_CONFIG=${GITHUB_WORKSPACE}/build/llvm-host/bin/llvm-config"
},
{
"name": "Linux release",
"os": "ubuntu-latest",
"release": true,
"quick": true,
"shell": "nix-shell --arg pkgsDist \"import (fetchTarball \\\"channel:nixos-19.03\\\") {{}}\" --run \"bash -euxo pipefail {0}\"",
"llvm-url": "https://github.com/leanprover/lean-llvm/releases/download/15.0.1/lean-llvm-x86_64-linux-gnu.tar.zst",
"prepare-llvm": "../script/prepare-llvm-linux.sh lean-llvm*",
"binary-check": "ldd -v",
// foreign code may be linked against more recent glibc
"CTEST_OPTIONS": "-E 'foreign'"
},
{
"name": "Linux",
"os": "ubuntu-latest",
"check-stage3": true,
"test-speedcenter": true,
"quick": false,
},
{
"name": "Linux Debug",
"os": "ubuntu-latest",
"quick": false,
"CMAKE_OPTIONS": "-DCMAKE_BUILD_TYPE=Debug",
// exclude seriously slow tests
"CTEST_OPTIONS": "-E 'interactivetest|leanpkgtest|laketest|benchtest'"
},
{
"name": "Linux fsanitize",
"os": "ubuntu-latest",
"quick": false,
// turn off custom allocator & symbolic functions to make LSAN do its magic
"CMAKE_OPTIONS": "-DLEAN_EXTRA_CXX_FLAGS=-fsanitize=address,undefined -DLEANC_EXTRA_FLAGS='-fsanitize=address,undefined -fsanitize-link-c++-runtime' -DSMALL_ALLOCATOR=OFF -DBSYMBOLIC=OFF",
// exclude seriously slow/problematic tests (laketests crash)
"CTEST_OPTIONS": "-E 'interactivetest|leanpkgtest|laketest|benchtest'"
},
{
"name": "macOS",
"os": "macos-latest",
"release": true,
"quick": false,
"shell": "bash -euxo pipefail {0}",
"llvm-url": "https://github.com/leanprover/lean-llvm/releases/download/15.0.1/lean-llvm-x86_64-apple-darwin.tar.zst",
"prepare-llvm": "../script/prepare-llvm-macos.sh lean-llvm*",
"binary-check": "otool -L",
"tar": "gtar" // https://github.com/actions/runner-images/issues/2619
},
{
"name": "macOS aarch64",
"os": "macos-latest",
"release": true,
"quick": false,
"cross": true,
"shell": "bash -euxo pipefail {0}",
"CMAKE_OPTIONS": "-DUSE_GMP=OFF -DLEAN_INSTALL_SUFFIX=-darwin_aarch64",
"llvm-url": "https://github.com/leanprover/lean-llvm/releases/download/15.0.1/lean-llvm-aarch64-apple-darwin.tar.zst https://github.com/leanprover/lean-llvm/releases/download/15.0.1/lean-llvm-x86_64-apple-darwin.tar.zst",
"prepare-llvm": "EXTRA_FLAGS=--target=aarch64-apple-darwin ../script/prepare-llvm-macos.sh lean-llvm-aarch64-* lean-llvm-x86_64-*",
"binary-check": "otool -L",
"tar": "gtar" // https://github.com/actions/runner-images/issues/2619
},
{
"name": "Windows",
"os": "windows-2022",
"release": true,
"quick": false,
"shell": "msys2 {0}",
"CMAKE_OPTIONS": "-G \"Unix Makefiles\" -DUSE_GMP=OFF",
// for reasons unknown, interactivetests are flaky on Windows
"CTEST_OPTIONS": "--repeat until-pass:2",
"llvm-url": "https://github.com/leanprover/lean-llvm/releases/download/15.0.1/lean-llvm-x86_64-w64-windows-gnu.tar.zst",
"prepare-llvm": "../script/prepare-llvm-mingw.sh lean-llvm*",
"binary-check": "ldd"
},
{
"name": "Linux aarch64",
"os": "ubuntu-latest",
"CMAKE_OPTIONS": "-DUSE_GMP=OFF -DLEAN_INSTALL_SUFFIX=-linux_aarch64",
"release": true,
"quick": false,
"cross": true,
"shell": "nix-shell --arg pkgsDist \"import (fetchTarball \\\"channel:nixos-19.03\\\") {{ localSystem.config = \\\"aarch64-unknown-linux-gnu\\\"; }}\" --run \"bash -euxo pipefail {0}\"",
"llvm-url": "https://github.com/leanprover/lean-llvm/releases/download/15.0.1/lean-llvm-x86_64-linux-gnu.tar.zst https://github.com/leanprover/lean-llvm/releases/download/15.0.1/lean-llvm-aarch64-linux-gnu.tar.zst",
"prepare-llvm": "EXTRA_FLAGS=--target=aarch64-unknown-linux-gnu ../script/prepare-llvm-linux.sh lean-llvm-aarch64-* lean-llvm-x86_64-*"
},
{
"name": "Linux 32bit",
"os": "ubuntu-latest",
// Use 32bit on stage0 and stage1 to keep oleans compatible
"CMAKE_OPTIONS": "-DSTAGE0_USE_GMP=OFF -DSTAGE0_LEAN_EXTRA_CXX_FLAGS='-m32' -DSTAGE0_LEANC_OPTS='-m32' -DSTAGE0_MMAP=OFF -DUSE_GMP=OFF -DLEAN_EXTRA_CXX_FLAGS='-m32' -DLEANC_OPTS='-m32' -DMMAP=OFF -DLEAN_INSTALL_SUFFIX=-linux_x86",
"cmultilib": true,
"release": true,
"quick": false,
"cross": true,
"shell": "bash -euxo pipefail {0}"
},
{
"name": "Web Assembly",
"os": "ubuntu-latest",
// Build a native 32bit binary in stage0 and use it to compile the oleans and the wasm build
"CMAKE_OPTIONS": "-DCMAKE_C_COMPILER_WORKS=1 -DSTAGE0_USE_GMP=OFF -DSTAGE0_LEAN_EXTRA_CXX_FLAGS='-m32' -DSTAGE0_LEANC_OPTS='-m32' -DSTAGE0_CMAKE_CXX_COMPILER=clang++ -DSTAGE0_CMAKE_C_COMPILER=clang -DSTAGE0_CMAKE_EXECUTABLE_SUFFIX=\"\" -DUSE_GMP=OFF -DMMAP=OFF -DSTAGE0_MMAP=OFF -DCMAKE_AR=../emsdk/emsdk-main/upstream/emscripten/emar -DCMAKE_TOOLCHAIN_FILE=../emsdk/emsdk-main/upstream/emscripten/cmake/Modules/Platform/Emscripten.cmake -DLEAN_INSTALL_SUFFIX=-linux_wasm32",
"wasm": true,
"cmultilib": true,
"release": true,
"quick": false,
"cross": true,
"shell": "bash -euxo pipefail {0}",
// Just a few selected tests because wasm is slow
"CTEST_OPTIONS": "-R \"leantest_1007\\.lean|leantest_Format\\.lean|leanruntest\\_1037.lean|leanruntest_ac_rfl\\.lean\""
}
];
console.log(`matrix:\n${JSON.stringify(matrix, null, 2)}`)
if (quick) {
return matrix.filter((job) => job.quick)
} else {
return matrix
}
- name: Checkout
uses: actions/checkout@v3
# don't schedule nightlies on forks
if: github.event_name == 'schedule' && github.repository == 'leanprover/lean4'
- name: Set Nightly
if: github.event_name == 'schedule' && github.repository == 'leanprover/lean4'
id: set-nightly
id: set
run: |
if [[ -n '${{ secrets.PUSH_NIGHTLY_TOKEN }}' ]]; then
git remote add nightly https://foo:'${{ secrets.PUSH_NIGHTLY_TOKEN }}'@github.com/${{ github.repository_owner }}/lean4-nightly.git
@@ -205,9 +39,26 @@ jobs:
fi
fi
# This job determines if this CI build is for a tagged release.
# It only runs when a tag is pushed to the `leanprover` repository.
# It sets `set-release.outputs.RELEASE_TAG` to the tag, if the tag is "v" followed by a valid semver,
# and sets `set-release.outputs.{LEAN_VERSION_MAJOR,LEAN_VERSION_MINOR,LEAN_VERSION_PATCH,LEAN_SPECIAL_VERSION_DESC}`
# to the semver components parsed via regex.
set-release:
runs-on: ubuntu-latest
outputs:
LEAN_VERSION_MAJOR: ${{ steps.set.outputs.LEAN_VERSION_MAJOR }}
LEAN_VERSION_MINOR: ${{ steps.set.outputs.LEAN_VERSION_MINOR }}
LEAN_VERSION_PATCH: ${{ steps.set.outputs.LEAN_VERSION_PATCH }}
LEAN_SPECIAL_VERSION_DESC: ${{ steps.set.outputs.LEAN_SPECIAL_VERSION_DESC }}
RELEASE_TAG: ${{ steps.set.outputs.RELEASE_TAG }}
steps:
- name: Checkout
uses: actions/checkout@v3
if: startsWith(github.ref, 'refs/tags/') && github.repository == 'leanprover/lean4'
- name: Check for official release
if: startsWith(github.ref, 'refs/tags/') && github.repository == 'leanprover/lean4'
id: set-release
id: set
run: |
TAG_NAME=${GITHUB_REF##*/}
@@ -236,17 +87,98 @@ jobs:
fi
build:
needs: [configure]
needs: [set-nightly, set-release]
if: github.event_name != 'schedule' || github.repository == 'leanprover/lean4'
strategy:
matrix:
include: ${{fromJson(needs.configure.outputs.matrix)}}
# complete all jobs
fail-fast: false
runs-on: ${{ matrix.os }}
defaults:
run:
shell: ${{ matrix.shell || 'nix-shell --run "bash -euxo pipefail {0}"' }}
strategy:
matrix:
include:
# portable release build: use channel with older glibc (2.27)
- name: Linux LLVM
os: ubuntu-latest
release: false
shell: nix-shell --arg pkgsDist "import (fetchTarball \"channel:nixos-19.03\") {{}}" --run "bash -euxo pipefail {0}"
llvm-url: https://github.com/leanprover/lean-llvm/releases/download/15.0.1/lean-llvm-x86_64-linux-gnu.tar.zst
prepare-llvm: ../script/prepare-llvm-linux.sh lean-llvm*
binary-check: ldd -v
# foreign code may be linked against more recent glibc
# reverse-ffi needs to be updated to link to LLVM libraries
CTEST_OPTIONS: -E 'foreign|leanlaketest_reverse-ffi'
CMAKE_OPTIONS: -DLLVM=ON -DLLVM_CONFIG=${GITHUB_WORKSPACE}/build/llvm-host/bin/llvm-config
- name: Linux release
os: ubuntu-latest
release: true
shell: nix-shell --arg pkgsDist "import (fetchTarball \"channel:nixos-19.03\") {{}}" --run "bash -euxo pipefail {0}"
llvm-url: https://github.com/leanprover/lean-llvm/releases/download/15.0.1/lean-llvm-x86_64-linux-gnu.tar.zst
prepare-llvm: ../script/prepare-llvm-linux.sh lean-llvm*
binary-check: ldd -v
# foreign code may be linked against more recent glibc
CTEST_OPTIONS: -E 'foreign'
- name: Linux
os: ubuntu-latest
check-stage3: true
test-speedcenter: true
- name: Linux Debug
os: ubuntu-latest
CMAKE_OPTIONS: -DCMAKE_BUILD_TYPE=Debug
# exclude seriously slow tests
CTEST_OPTIONS: -E 'interactivetest|leanpkgtest|laketest|benchtest'
- name: Linux fsanitize
os: ubuntu-latest
# turn off custom allocator & symbolic functions to make LSAN do its magic
CMAKE_OPTIONS: -DLEAN_EXTRA_CXX_FLAGS=-fsanitize=address,undefined -DLEANC_EXTRA_FLAGS='-fsanitize=address,undefined -fsanitize-link-c++-runtime' -DSMALL_ALLOCATOR=OFF -DBSYMBOLIC=OFF
# exclude seriously slow/problematic tests (laketests crash)
CTEST_OPTIONS: -E 'interactivetest|leanpkgtest|laketest|benchtest'
- name: macOS
os: macos-latest
release: true
shell: bash -euxo pipefail {0}
llvm-url: https://github.com/leanprover/lean-llvm/releases/download/15.0.1/lean-llvm-x86_64-apple-darwin.tar.zst
prepare-llvm: ../script/prepare-llvm-macos.sh lean-llvm*
binary-check: otool -L
tar: gtar # https://github.com/actions/runner-images/issues/2619
- name: macOS aarch64
os: macos-latest
release: true
cross: true
shell: bash -euxo pipefail {0}
CMAKE_OPTIONS: -DUSE_GMP=OFF -DLEAN_INSTALL_SUFFIX=-darwin_aarch64
llvm-url: https://github.com/leanprover/lean-llvm/releases/download/15.0.1/lean-llvm-aarch64-apple-darwin.tar.zst https://github.com/leanprover/lean-llvm/releases/download/15.0.1/lean-llvm-x86_64-apple-darwin.tar.zst
prepare-llvm: EXTRA_FLAGS=--target=aarch64-apple-darwin ../script/prepare-llvm-macos.sh lean-llvm-aarch64-* lean-llvm-x86_64-*
binary-check: otool -L
tar: gtar # https://github.com/actions/runner-images/issues/2619
- name: Windows
os: windows-2022
release: true
shell: msys2 {0}
CMAKE_OPTIONS: -G "Unix Makefiles" -DUSE_GMP=OFF
# for reasons unknown, interactivetests are flaky on Windows
CTEST_OPTIONS: --repeat until-pass:2
llvm-url: https://github.com/leanprover/lean-llvm/releases/download/15.0.1/lean-llvm-x86_64-w64-windows-gnu.tar.zst
prepare-llvm: ../script/prepare-llvm-mingw.sh lean-llvm*
binary-check: ldd
- name: Linux aarch64
os: ubuntu-latest
CMAKE_OPTIONS: -DUSE_GMP=OFF -DLEAN_INSTALL_SUFFIX=-linux_aarch64
release: true
cross: true
shell: nix-shell --arg pkgsDist "import (fetchTarball \"channel:nixos-19.03\") {{ localSystem.config = \"aarch64-unknown-linux-gnu\"; }}" --run "bash -euxo pipefail {0}"
llvm-url: https://github.com/leanprover/lean-llvm/releases/download/15.0.1/lean-llvm-x86_64-linux-gnu.tar.zst https://github.com/leanprover/lean-llvm/releases/download/15.0.1/lean-llvm-aarch64-linux-gnu.tar.zst
prepare-llvm: EXTRA_FLAGS=--target=aarch64-unknown-linux-gnu ../script/prepare-llvm-linux.sh lean-llvm-aarch64-* lean-llvm-x86_64-*
- name: Web Assembly
os: ubuntu-latest
# Build a native 32bit binary in stage0 and use it to compile the oleans and the wasm build
CMAKE_OPTIONS: -DCMAKE_C_COMPILER_WORKS=1 -DSTAGE0_USE_GMP=OFF -DSTAGE0_LEAN_EXTRA_CXX_FLAGS='-m32' -DSTAGE0_LEANC_OPTS='-m32' -DSTAGE0_CMAKE_CXX_COMPILER=clang++ -DSTAGE0_CMAKE_C_COMPILER=clang -DSTAGE0_CMAKE_EXECUTABLE_SUFFIX="" -DUSE_GMP=OFF -DMMAP=OFF -DSTAGE0_MMAP=OFF -DUSE_GMP=OFF -DCMAKE_AR=../emsdk/emsdk-main/upstream/emscripten/emar -DCMAKE_TOOLCHAIN_FILE=../emsdk/emsdk-main/upstream/emscripten/cmake/Modules/Platform/Emscripten.cmake
wasm: true
cross: true
shell: bash -euxo pipefail {0}
# Just a few selected test because wasm is slow
CTEST_OPTIONS: -R "leantest_1007\.lean|leantest_Format\.lean|leanruntest\_1037.lean|leanruntest_ac_rfl\.lean"
# complete all jobs
fail-fast: false
name: ${{ matrix.name }}
env:
# must be inside workspace
@@ -265,13 +197,11 @@ jobs:
uses: actions/checkout@v3
with:
submodules: true
# the default is to use a virtual merge commit between the PR and master: just use the PR
ref: ${{ github.event.pull_request.head.sha }}
- name: Install Nix
uses: cachix/install-nix-action@v18
with:
install_url: https://releases.nixos.org/nix/nix-2.12.0/install
if: matrix.os == 'ubuntu-latest' && !matrix.cmultilib
if: matrix.os == 'ubuntu-latest' && !matrix.wasm
- name: Install MSYS2
uses: msys2/setup-msys2@v2
with:
@@ -284,7 +214,7 @@ jobs:
brew install ccache tree zstd coreutils gmp
if: matrix.os == 'macos-latest'
- name: Setup emsdk
uses: mymindstorm/setup-emsdk@v12
uses: mymindstorm/setup-emsdk@v11
with:
version: 3.1.44
actions-cache-folder: emsdk
@@ -293,7 +223,7 @@ jobs:
run: |
sudo apt-get update
sudo apt-get install -y gcc-multilib g++-multilib ccache
if: matrix.cmultilib
if: matrix.wasm
- name: Cache
uses: actions/cache@v3
with:
@@ -318,22 +248,21 @@ jobs:
mkdir build
cd build
ulimit -c unlimited # coredumps
# this also enables githash embedding into stage 1 library
OPTIONS=(-DCHECK_OLEAN_VERSION=ON)
OPTIONS=()
if [[ -n '${{ matrix.prepare-llvm }}' ]]; then
wget -q ${{ matrix.llvm-url }}
PREPARE="$(${{ matrix.prepare-llvm }})"
eval "OPTIONS+=($PREPARE)"
fi
if [[ -n '${{ matrix.release }}' && -n '${{ needs.configure.outputs.nightly }}' ]]; then
OPTIONS+=(-DLEAN_SPECIAL_VERSION_DESC=${{ needs.configure.outputs.nightly }})
if [[ -n '${{ matrix.release }}' && -n '${{ needs.set-nightly.outputs.nightly }}' ]]; then
OPTIONS+=(-DLEAN_SPECIAL_VERSION_DESC=${{ needs.set-nightly.outputs.nightly }})
fi
if [[ -n '${{ matrix.release }}' && -n '${{ needs.configure.outputs.RELEASE_TAG }}' ]]; then
OPTIONS+=(-DLEAN_VERSION_MAJOR=${{ needs.configure.outputs.LEAN_VERSION_MAJOR }})
OPTIONS+=(-DLEAN_VERSION_MINOR=${{ needs.configure.outputs.LEAN_VERSION_MINOR }})
OPTIONS+=(-DLEAN_VERSION_PATCH=${{ needs.configure.outputs.LEAN_VERSION_PATCH }})
if [[ -n '${{ matrix.release }}' && -n '${{ needs.set-release.outputs.RELEASE_TAG }}' ]]; then
OPTIONS+=(-DLEAN_VERSION_MAJOR=${{ needs.set-release.outputs.LEAN_VERSION_MAJOR }})
OPTIONS+=(-DLEAN_VERSION_MINOR=${{ needs.set-release.outputs.LEAN_VERSION_MINOR }})
OPTIONS+=(-DLEAN_VERSION_PATCH=${{ needs.set-release.outputs.LEAN_VERSION_PATCH }})
OPTIONS+=(-DLEAN_VERSION_IS_RELEASE=1)
OPTIONS+=(-DLEAN_SPECIAL_VERSION_DESC=${{ needs.configure.outputs.LEAN_SPECIAL_VERSION_DESC }})
OPTIONS+=(-DLEAN_SPECIAL_VERSION_DESC=${{ needs.set-release.outputs.LEAN_SPECIAL_VERSION_DESC }})
fi
# contortion to support empty OPTIONS with old macOS bash
cmake .. ${{ matrix.CMAKE_OPTIONS }} ${OPTIONS[@]+"${OPTIONS[@]}"} -DLEAN_INSTALL_PREFIX=$PWD/..
@@ -344,13 +273,13 @@ jobs:
- name: List Install Tree
run: |
# omit contents of Init/, ...
tree --du -h lean-*-* | grep -E ' (Init|Lean|Lake|LICENSE|[a-z])'
tree --du -h lean-* | grep -E ' (Init|Lean|Lake|LICENSE|[a-z])'
- name: Pack
run: |
dir=$(echo lean-*-*)
dir=$(echo lean-*)
mkdir pack
# high-compression tar.zst + zip for release, fast tar.zst otherwise
if [[ '${{ startsWith(github.ref, 'refs/tags/') && matrix.release }}' == true || -n '${{ needs.configure.outputs.nightly }}' || -n '${{ needs.configure.outputs.RELEASE_TAG }}' ]]; then
if [[ '${{ startsWith(github.ref, 'refs/tags/') && matrix.release }}' == true || -n '${{ needs.set-nightly.outputs.nightly }}' || -n '${{ needs.set-release.outputs.RELEASE_TAG }}' ]]; then
${{ matrix.tar || 'tar' }} cf - $dir | zstd -T0 --no-progress -19 -o pack/$dir.tar.zst
zip -rq pack/$dir.zip $dir
else
@@ -380,13 +309,13 @@ jobs:
cd build
ulimit -c unlimited # coredumps
make -j4 stage2
if: matrix.test-speedcenter
if: matrix.build-stage2 || matrix.check-stage3
- name: Check Stage 3
run: |
cd build
ulimit -c unlimited # coredumps
make -j4 check-stage3
if: matrix.test-speedcenter
if: matrix.check-stage3
- name: Test Speedcenter Benchmarks
run: |
echo -1 | sudo tee /proc/sys/kernel/perf_event_paranoid
@@ -399,7 +328,7 @@ jobs:
cd build
ulimit -c unlimited # coredumps
make update-stage0 && make -j4
if: matrix.name == 'Linux' && !needs.configure.outputs.quick
if: matrix.name == 'Linux'
- name: CCache stats
run: ccache -s
- name: Show stacktrace for coredumps
@@ -423,21 +352,6 @@ jobs:
./build/stage2/bin/lean
./build/stage2/lib/lean/libleanshared.so
# This job collects results from all the matrix jobs
# This can be made the “required” job, instead of listing each
# matrix job separately
all-done:
name: Build matrix complete
runs-on: ubuntu-latest
needs: build
if: ${{ always() }}
steps:
- if: contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled')
uses: actions/github-script@v3
with:
script: |
core.setFailed('Some jobs failed')
# This job creates releases from tags
# (whether they are "unofficial" releases for experiments, or official releases when the tag is "v" followed by a semver string.)
# We do not attempt to automatically construct a changelog here:
@@ -461,8 +375,8 @@ jobs:
# This job creates nightly releases during the cron job.
# It is responsible for creating the tag, and automatically generating a changelog.
release-nightly:
needs: [configure, build]
if: needs.configure.outputs.nightly
needs: [set-nightly, build]
if: needs.set-nightly.outputs.nightly
runs-on: ubuntu-latest
steps:
- name: Checkout
@@ -478,9 +392,8 @@ jobs:
run: |
git remote add nightly https://foo:'${{ secrets.PUSH_NIGHTLY_TOKEN }}'@github.com/${{ github.repository_owner }}/lean4-nightly.git
git fetch nightly --tags
git tag ${{ needs.configure.outputs.nightly }}
git push nightly ${{ needs.configure.outputs.nightly }}
git push -f origin refs/tags/${{ needs.configure.outputs.nightly }}:refs/heads/nightly
git tag ${{ needs.set-nightly.outputs.nightly }}
git push nightly ${{ needs.set-nightly.outputs.nightly }}
last_tag=$(git log HEAD^ --simplify-by-decoration --pretty="format:%d" | grep -o "nightly-[-0-9]*" | head -n 1)
echo -e "*Changes since ${last_tag}:*\n\n" > diff.md
git show $last_tag:RELEASES.md > old.md
@@ -495,7 +408,7 @@ jobs:
prerelease: true
files: artifacts/*/*
fail_on_unmatched_files: true
tag_name: ${{ needs.configure.outputs.nightly }}
tag_name: ${{ needs.set-nightly.outputs.nightly }}
repository: ${{ github.repository_owner }}/lean4-nightly
env:
GITHUB_TOKEN: ${{ secrets.PUSH_NIGHTLY_TOKEN }}

View File

@@ -6,7 +6,8 @@ on:
tags:
- '*'
pull_request:
merge_group:
branches:
- master
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
@@ -33,9 +34,6 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v3
with:
# the default is to use a virtual merge commit between the PR and master: just use the PR
ref: ${{ github.event.pull_request.head.sha }}
- name: Install Nix
uses: cachix/install-nix-action@v18
with:

View File

@@ -32,9 +32,7 @@ jobs:
token: ${{ secrets.PR_RELEASES_TOKEN }}
# Since `workflow_run` runs on master, we need to specify which commit to check out,
# so that we tag the PR.
# It's important that we use `sourceHeadSha` here, not `targetCommitSha`
# as we *don't* want the synthetic merge with master.
ref: ${{ steps.workflow-info.outputs.sourceHeadSha }}
ref: ${{ steps.workflow-info.outputs.targetCommitSha }}
# We need a full checkout, so that we can push the PR commits to the `lean4-pr-releases` repo.
fetch-depth: 0
@@ -79,130 +77,30 @@ jobs:
number: ${{ steps.workflow-info.outputs.pullRequestNumber }}
labels: toolchain-available
# Next, determine the most recent nightly release in this PR's history.
- name: Find most recent nightly
id: most-recent-nightly-tag
if: ${{ steps.workflow-info.outputs.pullRequestNumber != '' }}
run: |
echo "MOST_RECENT_NIGHTLY=$(script/most-recent-nightly-tag.sh)" >> $GITHUB_ENV
- name: 'Setup jq'
if: ${{ steps.workflow-info.outputs.pullRequestNumber != '' }}
uses: dcarbone/install-jq-action@v1.0.1
# Check that the most recently nightly coincides with 'git merge-base HEAD master'
- name: Check merge-base and nightly-testing-YYYY-MM-DD
if: ${{ steps.workflow-info.outputs.pullRequestNumber != '' }}
id: ready
run: |
echo "Most recent nightly: $MOST_RECENT_NIGHTLY"
NIGHTLY_SHA=$(git rev-parse nightly-$MOST_RECENT_NIGHTLY^{commit})
echo "SHA of most recent nightly: $NIGHTLY_SHA"
MERGE_BASE_SHA=$(git merge-base origin/master HEAD)
echo "SHA of merge-base: $MERGE_BASE_SHA"
if [ "$NIGHTLY_SHA" = "$MERGE_BASE_SHA" ]; then
echo "Most recent nightly tag agrees with the merge base."
REMOTE_BRANCHES=$(git ls-remote -h https://github.com/leanprover-community/mathlib4.git nightly-testing-$MOST_RECENT_NIGHTLY)
if [[ -n "$REMOTE_BRANCHES" ]]; then
echo "... and Mathlib has a 'nightly-testing-$MOST_RECENT_NIGHTLY' branch."
MESSAGE=""
else
echo "... but Mathlib does not yet have a 'nightly-testing-$MOST_RECENT_NIGHTLY' branch."
MESSAGE="- ❗ Mathlib CI can not be attempted yet, as the 'nightly-testing-$MOST_RECENT_NIGHTLY' branch does not exist there yet. We will retry when you push more commits. It may be necessary to rebase onto 'nightly' tomorrow."
fi
else
echo "The most recently nightly tag on this branch has SHA: $NIGHTLY_SHA"
echo "but 'git merge-base origin/master HEAD' reported: $MERGE_BASE_SHA"
git log -10
MESSAGE="- ❗ Mathlib CI will not be attempted unless you rebase your PR onto the 'nightly' branch."
fi
if [[ -n "$MESSAGE" ]]; then
echo "Checking existing messages"
# Use GitHub API to check if a comment already exists
existing_comment=$(curl -L -s -H "Authorization: token ${{ secrets.MATHLIB4_BOT }}" \
-H "Accept: application/vnd.github.v3+json" \
"https://api.github.com/repos/leanprover/lean4/issues/${{ steps.workflow-info.outputs.pullRequestNumber }}/comments" \
| jq '.[] | select(.body | startswith("- ❗ Mathlib") or startswith("- ✅ Mathlib") or startswith("- ❌ Mathlib") or startswith("- 💥 Mathlib") or startswith("- 🟡 Mathlib"))')
existing_comment_id=$(echo "$existing_comment" | jq -r .id)
existing_comment_body=$(echo "$existing_comment" | jq -r .body)
if [[ "$existing_comment_body" != *"$MESSAGE"* ]]; then
MESSAGE="$MESSAGE ($(date "+%Y-%m-%d %H:%M:%S"))"
echo "Posting message to the comments: $MESSAGE"
# Append new result to the existing comment or post a new comment
# It's essential we use the MATHLIB4_BOT token here, so that Mathlib CI can subsequently edit the comment.
if [ -z "$existing_comment_id" ]; then
# Post new comment with a bullet point
echo "Posting as new comment at leanprover/lean4/issues/${{ steps.workflow-info.outputs.pullRequestNumber }}/comments"
curl -L -s \
-X POST \
-H "Authorization: token ${{ secrets.MATHLIB4_BOT }}" \
-H "Accept: application/vnd.github.v3+json" \
-d "$(jq --null-input --arg val "$MESSAGE" '{"body": $val}')" \
"https://api.github.com/repos/leanprover/lean4/issues/${{ steps.workflow-info.outputs.pullRequestNumber }}/comments"
else
# Append new result to the existing comment
echo "Appending to existing comment at leanprover/lean4/issues/${{ steps.workflow-info.outputs.pullRequestNumber }}/comments"
curl -L -s \
-X PATCH \
-H "Authorization: token ${{ secrets.MATHLIB4_BOT }}" \
-H "Accept: application/vnd.github.v3+json" \
-d "$(jq --null-input --arg existing "$existing_comment_body" --arg message "$MESSAGE" '{"body":($existing + "\n" + $message)}')" \
"https://api.github.com/repos/leanprover/lean4/issues/comments/$existing_comment_id"
fi
else
echo "The message already exists in the comment body."
fi
echo "::set-output name=mathlib_ready::false"
else
echo "::set-output name=mathlib_ready::true"
fi
# We next automatically create a Mathlib branch using this toolchain.
# Mathlib CI will be responsible for reporting back success or failure
# to the PR comments asynchronously.
- name: Cleanup workspace
if: steps.workflow-info.outputs.pullRequestNumber != '' && steps.ready.outputs.mathlib_ready == 'true'
if: ${{ steps.workflow-info.outputs.pullRequestNumber != '' }}
run: |
sudo rm -rf *
# Checkout the mathlib4 repository with all branches
- name: Checkout mathlib4 repository
if: steps.workflow-info.outputs.pullRequestNumber != '' && steps.ready.outputs.mathlib_ready == 'true'
uses: actions/checkout@v3
uses: actions/checkout@v2
with:
repository: leanprover-community/mathlib4
token: ${{ secrets.MATHLIB4_BOT }}
ref: nightly-testing
fetch-depth: 0 # This ensures we check out all tags and branches.
ref: nightly-testing # This is more likely than `master` to work with the base of this PR.
fetch-depth: 0
- name: Check if branch exists
if: steps.workflow-info.outputs.pullRequestNumber != '' && steps.ready.outputs.mathlib_ready == 'true'
if: ${{ steps.workflow-info.outputs.pullRequestNumber != '' }}
id: check_branch
run: |
git config user.name "leanprover-community-mathlib4-bot"
git config user.email "leanprover-community-mathlib4-bot@users.noreply.github.com"
if git branch -r | grep -q "nightly-testing-${MOST_RECENT_NIGHTLY}"; then
BASE=nightly-testing-${MOST_RECENT_NIGHTLY}
else
echo "This shouldn't be possible: couldn't find a 'nightly-testing-${MOST_RECENT_NIGHTLY}' branch at Mathlib. Falling back to 'nightly-testing'."
BASE=nightly-testing
fi
echo "Using base branch: $BASE"
git checkout $BASE
EXISTS=$(git ls-remote --heads origin lean-pr-testing-${{ steps.workflow-info.outputs.pullRequestNumber }} | wc -l)
echo "Branch exists: $EXISTS"
if [ "$EXISTS" = "0" ]; then
@@ -214,12 +112,15 @@ jobs:
else
echo "Branch already exists, pushing an empty commit."
git checkout lean-pr-testing-${{ steps.workflow-info.outputs.pullRequestNumber }}
# The Mathlib `nightly-testing` or `nightly-testing-YYYY-MM-DD` branch may have moved since this branch was created, so merge their changes.
git merge $BASE --strategy-option ours --no-commit --allow-unrelated-histories
# The Mathlib `nightly-testing` branch may have moved since this branch was created, so merge their changes.
# If the base of this Lean4 PR becomes significantly older than the nightly being used by `nightly-testing`
# this will cause breakages rather than fixing them!
# Without cumbersome requirements that Lean4 PRs are based off nightlies, I'm not sure there is a perfect solution here.
git merge nightly-testing --strategy-option ours --no-commit --allow-unrelated-histories
git commit --allow-empty -m "Trigger CI for https://github.com/leanprover/lean4/pull/${{ steps.workflow-info.outputs.pullRequestNumber }}"
fi
- name: Push changes
if: steps.workflow-info.outputs.pullRequestNumber != '' && steps.ready.outputs.mathlib_ready == 'true'
if: ${{ steps.workflow-info.outputs.pullRequestNumber != '' }}
run: |
git push origin lean-pr-testing-${{ steps.workflow-info.outputs.pullRequestNumber }}

View File

@@ -1,20 +0,0 @@
name: Check PR title for commit convention
on:
merge_group:
pull_request:
types: [opened, synchronize, reopened, edited]
jobs:
check-pr-title:
runs-on: ubuntu-latest
steps:
- name: Check PR title
uses: actions/github-script@v6
with:
script: |
const msg = context.payload.pull_request? context.payload.pull_request.title : context.payload.merge_group.head_commit.message;
console.log(`Message: ${msg}`)
if (!/^(feat|fix|doc|style|refactor|test|chore|perf): .*[^.]($|\n\n)/.test(msg)) {
core.setFailed('PR title does not follow the Commit Convention (https://leanprover.github.io/lean4/doc/dev/commit_convention.html).');
}

31
.github/workflows/pr.yml vendored Normal file
View File

@@ -0,0 +1,31 @@
name: sanity-check opened PRs
on:
# needs read/write GH token, do *not* execute arbitrary code from PR
pull_request_target:
types: [opened]
jobs:
check-pr:
runs-on: ubuntu-latest
steps:
- name: Check Commit Message
uses: actions/github-script@v6
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: |
const { data: commits } = await github.rest.pulls.listCommits({
owner: context.repo.owner,
repo: context.repo.repo,
pull_number: context.issue.number,
});
console.log(commits[0].commit.message);
// check first commit only (and only once) since later commits might be intended to be squashed away
if (!/^(feat|fix|doc|style|refactor|test|chore|perf): .*[^.]($|\n\n)/.test(commits[0].commit.message)) {
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
body: 'Thanks for your contribution! Please make sure to follow our [Commit Convention](https://leanprover.github.io/lean4/doc/dev/commit_convention.html).',
});
}

2
.gitignore vendored
View File

@@ -2,8 +2,6 @@
\#*
.#*
*.lock
.lake
lake-manifest.json
build
!/src/lake/Lake/Build
GPATH

7
.vscode/settings.json vendored Normal file
View File

@@ -0,0 +1,7 @@
{
"files.insertFinalNewline": true,
"files.trimTrailingWhitespace": true,
"[markdown]": {
"rewrap.wrappingColumn": 70
}
}

View File

@@ -11,7 +11,7 @@ foreach(var ${vars})
list(APPEND STAGE0_ARGS "-D${CMAKE_MATCH_1}=${${var}}")
elseif("${currentHelpString}" MATCHES "No help, variable specified on the command line." OR "${currentHelpString}" STREQUAL "")
list(APPEND CL_ARGS "-D${var}=${${var}}")
if("${var}" MATCHES "USE_GMP|CHECK_OLEAN_VERSION")
if("${var}" STREQUAL "USE_GMP")
# must forward options that generate incompatible .olean format
list(APPEND STAGE0_ARGS "-D${var}=${${var}}")
endif()
@@ -35,8 +35,6 @@ ExternalProject_add(stage0
SOURCE_SUBDIR src
BINARY_DIR stage0
# do not rebuild stage0 when git hash changes; it's not from this commit anyway
# (however, `CHECK_OLEAN_VERSION=ON` in CI will override this as we need to
# embed the githash into the stage 1 library built by stage 0)
CMAKE_ARGS -DSTAGE=0 -DUSE_GITHASH=OFF ${PLATFORM_ARGS} ${STAGE0_ARGS}
BUILD_ALWAYS ON # cmake doesn't auto-detect changes without a download method
INSTALL_COMMAND "" # skip install

View File

@@ -1,21 +0,0 @@
# Code Owners
#
# Documents responsible people per component.
# Listed persons will automatically be asked by GitHub to review a PR touching these paths.
# If multiple names are listed, a review by any of them is considered sufficient by default.
/.github/ @Kha @semorrison
/RELEASES.md @semorrison
/src/ @leodemoura @Kha
/src/Init/IO.lean @joehendrix
/src/kernel/ @leodemoura
/src/lake/ @tydeu
/src/Lean/Compiler/ @leodemoura
/src/Lean/Data/Lsp/ @mhuisi
/src/Lean/Elab/Deriving/ @semorrison
/src/Lean/Elab/Tactic/ @semorrison
/src/Lean/Meta/Tactic/ @leodemoura
/src/Lean/Parser/ @Kha
/src/Lean/PrettyPrinter/ @Kha
/src/Lean/Server/ @mhuisi
/src/runtime/io.cpp @joehendrix

View File

@@ -1,79 +1,61 @@
External Contribution Guidelines
============
# Contribution Guidelines
In the past, we accepted most pull requests. This practice produced hard to maintain code, performance problems, and bugs. In order to improve the quality and maintainability of our codebase, we've established the following guidelines for external contributions.
Thank you for your interest in contributing to Lean! There are many ways to contribute and we appreciate all of them.
Helpful links
-------
## Bug reports
* [Development Setup](./doc/dev/index.md)
* [Testing](./doc/dev/testing.md)
* [Commit convention](./doc/dev/commit_convention.md)
Bug reports as new issues are always welcome. Please check the existing [issues](https://github.com/leanprover/lean4/issues) first.
Reduce the issue to a self-contained, reproducible test case.
If you have the chance, before reporting a bug, please search existing issues, as it's possible that
someone else has already reported your error.
If you're not sure if something is a bug or not, feel free to file a bug anyway. You may also want to discuss it with the Lean
community using the [lean4 Zulip channel](https://leanprover.zulipchat.com/#narrow/stream/270676-lean4).
Before You Submit a Pull Request (PR):
-------
## Simple fixes
**Start with an Issue**: Before submitting a PR, always open an issue discussing the problem you wish to solve or the feature you'd like to add. Use the prefix `RFC:` (request for comments) if you are proposing a new feature. Ask for feedback from other users. Take the time to summarize all the feedback. This allows the maintainers to evaluate your proposal more efficiently. When creating a RFC, consider the following questions:
Simple fixes for **typos and clear bugs** are welcome.
- **User Experience**: How does this feature improve the user experience?
# **IMPORTANT**
- **Beneficiaries**: Which Lean users and projects do benefit most from this feature/change?
We are currently overwhelmed. We respectfully request that you hold off on submitting Pull Requests and creating Request for Comments (RFCs) at this time. Our team is actively seeking funding to expand the Lean development team and improve our capacity to review and integrate contributions. We appreciate your understanding and look forward to being able to accept contributions in the near future. In the meantime, the process described in the following sections is temporarily suspended.
- **Community Feedback**: Have you sought feedback or insights from other Lean users?
## Documentation
- **Maintainability**: Will this change streamline code maintenance or simplify its structure?
Tutorial-like examples are very welcome.
They are useful for finding rough edges and bugs in Lean 4, for highlighting new features, and for showing how to use Lean.
If you want to store your tutorial in the Lean 4 repository to make sure future changes will not break it, we suggest the following workflow:
* Contact one of the Lean developers on Zulip, and check whether your tutorial is a good match for the Lean 4 repository.
* Send bug reports and report rough edges. We will work with you until the tutorial looks great.
* Add plenty of comments and make sure others will be able to follow it.
* Create a pull request in the Lean 4 repository. After merging, we will link it to the official documentation and make sure it becomes part of our test suite.
**Understand the Project**: Familiarize yourself with the project, existing issues, and latest commits. Ensure your contribution aligns with the project's direction and priorities.
You can use `.lean` or `.md` files to create your tutorial. The `.md` files are ideal when you want to format your prose using markdown. For an example, see [this `.md` file](https://github.com/leanprover/lean4/blob/master/doc/lean3changes.md).
**Stay Updated**: Regularly fetch and merge changes from the main branch to ensure your branch is up-to-date and can be smoothly integrated.
Contributions to the reference manual are also welcome, but since Lean 4 is changing rapidly, please contact us first using Zulip
to find out which parts are stable enough to document. We will work with you to get this kind of
pull request merged. We are also happy to meet using Zoom, Skype or Google hangout to coordinate this kind of effort.
**Help wanted**: We have issues tagged with ["help wanted"](https://github.com/leanprover/lean4/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22), if you want to contribute to the project, please take a look at them. If you are interested in one of them, post comments, ask questions, and engage with the core developers there.
As Lean 4 matures, other forms of documentation (e.g., doc-strings) will be welcome too.
Quality Over Quantity:
-----
## "Help wanted"
**Focused Changes**: Each PR should address a single, clearly-defined issue or feature. Avoid making multiple unrelated changes in a single PR.
For issues marked as [`help wanted`](https://github.com/leanprover/lean4/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22), pull requests (PR) are welcome and we will work with you to get a PR merged. Some of these issues are nontrivial. If you are interested, please consider adding comments to the issue and/or messaging the Lean developers in [Zulip](https://leanprover.zulipchat.com/#).
**Write Tests**: Every new feature or bug fix should come with relevant tests. This ensures the robustness and reliability of the contribution.
## Unexpected Pull Requests
**Documentation**: Update relevant documentation, including comments in the code, to explain the logic and reasoning behind your changes.
We have very few core developers, and we cannot review arbitrary pull requests (PRs). Moreover, many features involve subtle tradeoffs, and it may require significant time and energy to even assess a proposed design. We suggest the following workflow:
Coding Standards:
----
* First, discuss your idea with the Lean community on Zulip. Ask the community to help collect examples, document the requirements, and detect complications.
* If there is broad support, create a detailed issue for it on the Lean 4 repository at GitHub, and tag the issue with `RFC`.
* Ask the community for help documenting the requirements, and for collecting examples and concerns.
* Wait for one of the core developers to give you a "go ahead". At this point, the core developers will work with you to make sure your PR gets merged.
**Follow the Code Style**: Ensure that your code follows the established coding style of the project.
We don't want to waste your time by you implementing a feature and then us not being able to merge it.
**Lean on Lean**: Use Lean's built-in features and libraries effectively, avoiding reinventions.
## How to Contribute
**Performance**: Make sure that your changes do not introduce performance regressions. If possible, optimize the solution for speed and resource usage.
PR Submission:
---
**Descriptive Title and Summary**: The PR title should briefly explain the purpose of the PR. The summary should give more detailed information on what changes are made and why. Links to Zulip threads are not acceptable as a summary. You are responsible for summarizing the discussion, and getting support for it.
**Follow the commit convention**: Pull requests are squash merged, and the
commit message is taken from the pull request title and body, so make sure they adhere to the [commit convention](https://github.com/leanprover/lean4/blob/master/doc/dev/commit_convention.md). Put questions and extra information, which should not be part of the final commit message, into a first comment rather than the Pull Request description.
Because the change will be squashed, there is no need to polish the commit messages and history on the branch.
**Link to Relevant Issues**: Reference any issues that your PR addresses to provide context.
**Stay Responsive**: Once the PR is submitted, stay responsive to feedback and be prepared to make necessary revisions. We will close any PR that has been inactive (no response or updates from the submitter) for more than a month.
Reviews and Feedback:
----
**Be Patient**: Given the limited number of full-time maintainers and the volume of PRs, reviews may take some time.
**Engage Constructively**: Always approach feedback positively and constructively. Remember, reviews are about ensuring the best quality for the project, not personal criticism.
**Continuous Integration**: Ensure that all CI checks pass on your PR. Failed checks will delay the review process. The maintainers will not check PRs containing failures.
What to Expect:
----
**Not All PRs Get Merged**: While we appreciate every contribution, not all PRs will be merged. Ensure your changes align with the project's goals and quality standards.
**Feedback is a Gift**: It helps improve the project and can also help you grow as a developer or contributor.
**Community Involvement**: Engage with the Lean community on our communication channels. This can lead to better collaboration and understanding of the project's direction.
* Always follow the [commit convention](https://lean-lang.org/lean4/doc/dev/commit_convention.html).
* Follow the style of the surrounding code. When in doubt, look at other files using the particular syntax as well.
* Make sure your code is documented.
* New features or bug fixes should come with appropriate tests.
* Ensure all tests work before submitting a PR; see [Development Setup](https://lean-lang.org/lean4/doc/make/index.html#development-setup) and [Fixing Tests](https://lean-lang.org/lean4/doc/dev/fixing_tests.html).

View File

@@ -14,7 +14,7 @@ and have just begun regular [stable point releases](https://github.com/leanprove
- [Manual](https://lean-lang.org/lean4/doc/)
- [Release notes](RELEASES.md) starting at v4.0.0-m3
- [Examples](https://lean-lang.org/lean4/doc/examples.html)
- [External Contribution Guidelines](CONTRIBUTING.md)
- [External Contribution Guidelines](https://github.com/leanprover/lean4/blob/master/doc/contributions.md)
- [FAQ](https://lean-lang.org/lean4/doc/faq.html)
# Installation

View File

@@ -5,82 +5,13 @@ There is not yet a strong guarantee of backwards compatibility between versions,
only an expectation that breaking changes will be documented in this file.
This file contains work-in-progress notes for the upcoming release, as well as previous stable releases.
Please check the [releases](https://github.com/leanprover/lean4/releases) page for the current status
of each version.
Please check the [releases](https://github.com/leanprover/lean4/releases) page for the current status of each version.
v4.5.0 (development in progress)
---------
v4.4.0
---------
* [Rename request handler](https://github.com/leanprover/lean4/pull/2462).
* [Import auto-completion](https://github.com/leanprover/lean4/pull/2904).
* [`pp.beta`` to apply beta reduction when pretty printing](https://github.com/leanprover/lean4/pull/2864).
* [Per-package server options](https://github.com/leanprover/lean4/pull/2858).
* [Embed and check githash in .olean](https://github.com/leanprover/lean4/pull/2766).
* [Guess lexicographic order for well-founded recursion](https://github.com/leanprover/lean4/pull/2874).
* [Allow trailing comma in tuples, lists, and tactics](https://github.com/leanprover/lean4/pull/2643).
Bug fixes for [#2628](https://github.com/leanprover/lean4/issue/2628), [#2883](https://github.com/leanprover/lean4/issue/2883),
[#2810](https://github.com/leanprover/lean4/issue/2810), [#2925](https://github.com/leanprover/lean4/issue/2925), and [#2914](https://github.com/leanprover/lean4/issue/2914).
**Lake:**
* `lake init .` and a bare `lake init` and will now use the current directory as the package name. [#2890](https://github.com/leanprover/lean4/pull/2890)
* `lake new` and `lake init` will now produce errors on invalid package names such as `..`, `foo/bar`, `Init`, `Lean`, `Lake`, and `Main`. See issue [#2637](https://github.com/leanprover/lean4/issue/2637) and PR [#2890](https://github.com/leanprover/lean4/pull/2890).
* `lean_lib` no longer converts its name to upper camel case (e.g., `lean_lib bar` will include modules named `bar.*` rather than `Bar.*`). See issue [#2567](https://github.com/leanprover/lean4/issue/2567) and PR [#2889](https://github.com/leanprover/lean4/pull/2889).
* Lean and Lake now properly support non-identifier library names (e.g., `lake new 123-hello` and `import «123Hello»` now work correctly). See issue [#2865](https://github.com/leanprover/lean4/issue/2865) and PR [#2889](https://github.com/leanprover/lean4/pull/2888).
* Lake now filters the environment extensions loaded from a compiled configuration (`lakefile.olean`) to include only those relevant to Lake's workspace loading process. This resolves segmentation faults caused by environment extension type mismatches (e.g., when defining custom elaborators via `elab` in configurations). See issue [#2632](https://github.com/leanprover/lean4/issue/2632) and PR [#2896](https://github.com/leanprover/lean4/pull/2896).
* Cloud releases will now properly be re-unpacked if the build directory is removed. See PR [#2928](https://github.com/leanprover/lean4/pull/2928).
* Lake's `math` template has been simplified. See PR [#2930](https://github.com/leanprover/lean4/pull/2930).
* `lake exe <target>` now parses `target` like a build target (as the help text states it should) rather than as a basic name. For example, `lake exe @mathlib/runLinter` should now work. See PR [#2932](https://github.com/leanprover/lean4/pull/2932).
* `lake new foo.bar [std]` now generates executables named `foo-bar` and `lake new foo.bar exe` properly creates `foo/bar.lean`. See PR [#2932](https://github.com/leanprover/lean4/pull/2932).
* Later packages and libraries in the dependency tree are now preferred over earlier ones. That is, the later ones "shadow" the earlier ones. Such an ordering is more consistent with how declarations generally work in programming languages. This will break any package that relied on the previous ordering. See issue [#2548](https://github.com/leanprover/lean4/issues/2548) and PR [#2937](https://github.com/leanprover/lean4/pull/2937).
* Executable roots are no longer mistakenly treated as importable. They will no longer be picked up by `findModule?`. See PR [#2937](https://github.com/leanprover/lean4/pull/2937).
v4.3.0
---------
* `simp [f]` does not unfold partial applications of `f` anymore. See issue [#2042](https://github.com/leanprover/lean4/issues/2042).
To fix proofs affected by this change, use `unfold f` or `simp (config := { unfoldPartialApp := true }) [f]`.
* By default, `simp` will no longer try to use Decidable instances to rewrite terms. In particular, not all decidable goals will be closed by `simp`, and the `decide` tactic may be useful in such cases. The `decide` simp configuration option can be used to locally restore the old `simp` behavior, as in `simp (config := {decide := true})`; this includes using Decidable instances to verify side goals such as numeric inequalities.
* Many bug fixes:
* [Add left/right actions to term tree coercion elaborator and make `^`` a right action](https://github.com/leanprover/lean4/pull/2778)
* [Fix for #2775, don't catch max recursion depth errors](https://github.com/leanprover/lean4/pull/2790)
* [Reduction of `Decidable` instances very slow when using `cases` tactic](https://github.com/leanprover/lean4/issues/2552)
* [`simp` not rewriting in binder](https://github.com/leanprover/lean4/issues/1926)
* [`simp` unfolding `let` even with `zeta := false` option](https://github.com/leanprover/lean4/issues/2669)
* [`simp` (with beta/zeta disabled) and discrimination trees](https://github.com/leanprover/lean4/issues/2281)
* [unknown free variable introduced by `rw ... at h`](https://github.com/leanprover/lean4/issues/2711)
* [`dsimp` doesn't use `rfl` theorems which consist of an unapplied constant](https://github.com/leanprover/lean4/issues/2685)
* [`dsimp` does not close reflexive equality goals if they are wrapped in metadata](https://github.com/leanprover/lean4/issues/2514)
* [`rw [h]` uses `h` from the environment in preference to `h` from the local context](https://github.com/leanprover/lean4/issues/2729)
* [missing `withAssignableSyntheticOpaque` for `assumption` tactic](https://github.com/leanprover/lean4/issues/2361)
* [ignoring default value for field warning](https://github.com/leanprover/lean4/issues/2178)
* [Cancel outstanding tasks on document edit in the language server](https://github.com/leanprover/lean4/pull/2648).
* [Remove unnecessary `%` operations in `Fin.mod` and `Fin.div`](https://github.com/leanprover/lean4/pull/2688)
* [Avoid `DecidableEq` in `Array.mem`](https://github.com/leanprover/lean4/pull/2774)
* [Ensure `USize.size` unifies with `?m + 1`](https://github.com/leanprover/lean4/issues/1926)
* [Improve compatibility with emacs eglot client](https://github.com/leanprover/lean4/pull/2721)
**Lake:**
* [Sensible defaults for `lake new MyProject math`](https://github.com/leanprover/lean4/pull/2770)
* Changed `postUpdate?` configuration option to a `post_update` declaration. See the `post_update` syntax docstring for more information on the new syntax.
* [A manifest is automatically created on workspace load if one does not exists.](https://github.com/leanprover/lean4/pull/2680).
* The `:=` syntax for configuration declarations (i.e., `package`, `lean_lib`, and `lean_exe`) has been deprecated. For example, `package foo := {...}` is deprecated.
* [support for overriding package URLs via `LAKE_PKG_URL_MAP`](https://github.com/leanprover/lean4/pull/2709)
* Moved the default build directory (e.g., `build`), default packages directory (e.g., `lake-packages`), and the compiled configuration (e.g., `lakefile.olean`) into a new dedicated directory for Lake outputs, `.lake`. The cloud release build archives are also stored here, fixing [#2713](https://github.com/leanprover/lean4/issues/2713).
* Update manifest format to version 7 (see [lean4#2801](https://github.com/leanprover/lean4/pull/2801) for details on the changes).
* Deprecate the `manifestFile` field of a package configuration.
* There is now a more rigorous check on `lakefile.olean` compatibility (see [#2842](https://github.com/leanprover/lean4/pull/2842) for more details).
v4.2.0
v4.3.0 (development in progress)
---------
* [isDefEq cache for terms not containing metavariables.](https://github.com/leanprover/lean4/pull/2644).
* [Cancel outstanding tasks on document edit in the language server](https://github.com/leanprover/lean4/pull/2648).
* Make [`Environment.mk`](https://github.com/leanprover/lean4/pull/2604) and [`Environment.add`](https://github.com/leanprover/lean4/pull/2642) private, and add [`replay`](https://github.com/leanprover/lean4/pull/2617) as a safer alternative.
* `IO.Process.output` no longer inherits the standard input of the caller.
* [Do not inhibit caching](https://github.com/leanprover/lean4/pull/2612) of default-level `match` reduction.
@@ -89,6 +20,10 @@ v4.2.0
* [Show path of failed import in Lake](https://github.com/leanprover/lean4/pull/2616).
* [Fix linker warnings on macOS](https://github.com/leanprover/lean4/pull/2598).
* **Lake:** Add `postUpdate?` package configuration option. Used by a package to specify some code which should be run after a successful `lake update` of the package or one of its downstream dependencies. ([lake#185](https://github.com/leanprover/lake/issues/185))
v4.2.0
---------
* Improvements to Lake startup time ([#2572](https://github.com/leanprover/lean4/pull/2572), [#2573](https://github.com/leanprover/lean4/pull/2573))
* `refine e` now replaces the main goal with metavariables which were created during elaboration of `e` and no longer captures pre-existing metavariables that occur in `e` ([#2502](https://github.com/leanprover/lean4/pull/2502)).
* This is accomplished via changes to `withCollectingNewGoalsFrom`, which also affects `elabTermWithHoles`, `refine'`, `calc` (tactic), and `specialize`. Likewise, all of these now only include newly-created metavariables in their output.

68
doc/contributions.md Normal file
View File

@@ -0,0 +1,68 @@
External Contribution Guidelines
============
**In the past, we accepted most pull requests. This practice produced hard to maintain code, performance problems, and bugs.** In order to improve the quality and maintainability of our codebase, we've established the following guidelines for external contributions.
Before You Submit a Pull Request (PR):
-------
**Start with an Issue**: Before submitting a PR, always open an issue discussing the problem you wish to solve or the feature you'd like to add. Use the prefix `RFC:` (request for comments) if you are proposing a new feature. Ask for feedback from other users. Take the time to summarize all the feedback. This allows the maintainers to evaluate your proposal more efficiently. When creating a RFC, consider the following questions:
- **User Experience**: How does this feature improve the user experience?
- **Beneficiaries**: Which Lean users and projects do benefit most from this feature/change?
- **Community Feedback**: Have you sought feedback or insights from other Lean users?
- **Maintainability**: Will this change streamline code maintenance or simplify its structure?
**Understand the Project**: Familiarize yourself with the project, existing issues, and latest commits. Ensure your contribution aligns with the project's direction and priorities.
**Stay Updated**: Regularly fetch and merge changes from the main branch to ensure your branch is up-to-date and can be smoothly integrated.
**Help wanted**: We have issues tagged with ["help wanted"](https://github.com/leanprover/lean4/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22), if you want to contribute to the project, please take a look at them. If you are interested in one of them, post comments, ask questions, and engage with the core developers there.
Quality Over Quantity:
-----
**Focused Changes**: Each PR should address a single, clearly-defined issue or feature. Avoid making multiple unrelated changes in a single PR.
**Write Tests**: Every new feature or bug fix should come with relevant tests. This ensures the robustness and reliability of the contribution.
**Documentation**: Update relevant documentation, including comments in the code, to explain the logic and reasoning behind your changes.
Coding Standards:
----
**Follow the Code Style**: Ensure that your code follows the established coding style of the project.
**Lean on Lean**: Use Lean's built-in features and libraries effectively, avoiding reinventions.
**Performance**: Make sure that your changes do not introduce performance regressions. If possible, optimize the solution for speed and resource usage.
PR Submission:
---
**Descriptive Title and Summary**: The PR title should briefly explain the purpose of the PR. The summary should give more detailed information on what changes are made and why. Links to Zulip threads are not acceptable as a summary. You are responsible for summarizing the discussion, and getting support for it.
**Link to Relevant Issues**: Reference any issues that your PR addresses to provide context.
**Stay Responsive**: Once the PR is submitted, stay responsive to feedback and be prepared to make necessary revisions. We will close any PR that has been inactive (no response or updates from the submitter) for more than a month.
Reviews and Feedback:
----
**Be Patient**: Given the limited number of full-time maintainers and the volume of PRs, reviews may take some time.
**Engage Constructively**: Always approach feedback positively and constructively. Remember, reviews are about ensuring the best quality for the project, not personal criticism.
**Continuous Integration**: Ensure that all CI checks pass on your PR. Failed checks will delay the review process. The maintainers will not check PRs containing failures.
What to Expect:
----
**Not All PRs Get Merged**: While we appreciate every contribution, not all PRs will be merged. Ensure your changes align with the project's goals and quality standards.
**Feedback is a Gift**: It helps improve the project and can also help you grow as a developer or contributor.
**Community Involvement**: Engage with the Lean community on our communication channels. This can lead to better collaboration and understanding of the project's direction.

View File

@@ -1,15 +1,10 @@
Git Commit Convention
=====================
We are using the following convention for writing git commit messages. For pull
requests, make sure the pull request title and description follow this
convention, as the squash-merge commit will inherit title and body from the
pull request.
This convention is based on the one from the AngularJS project ([doc][angularjs-doc],
We are using the following convention for writing git-commit messages.
It is based on the one from AngularJS project([doc][angularjs-doc],
[commits][angularjs-git]).
[angularjs-git]: https://github.com/angular/angular.js/commits/master
[angularjs-doc]: https://docs.google.com/document/d/1QrDFcIiPjSLDn3EL15IJygNPiHORgU1_OOAqWjiDU5Y/edit#

View File

@@ -1,6 +1,6 @@
# Development Workflow
If you want to make changes to Lean itself, start by [building Lean](../make/index.md) from a clean checkout to make sure that everything is set up correctly.
If you want to make changes to Lean itself, start by [building Lean](../make/index.html) from a clean checkout to make sure that everything is set up correctly.
After that, read on below to find out how to set up your editor for changing the Lean source code, followed by further sections of the development manual where applicable such as on the [test suite](testing.md) and [commit convention](commit_convention.md).
If you are planning to make any changes that may affect the compilation of Lean itself, e.g. changes to the parser, elaborator, or compiler, you should first read about the [bootstrapping pipeline](bootstrap.md).
@@ -30,14 +30,20 @@ powershell -f elan-init.ps1 --default-toolchain none
del elan-init.ps1
```
The `lean-toolchain` files in the Lean 4 repository are set up to use the `lean4-stage0`
toolchain for editing files in `src` and the `lean4` toolchain for editing files in `tests`.
Run the following commands to make `lean4` point at `stage1` and `lean4-stage0` point at `stage0`:
You can use `elan toolchain link` to give a specific stage build
directory a reference name, then use `elan override set` to associate
such a name to the current directory. We usually want to use `stage0`
for editing files in `src` and `stage1` for everything else (e.g.
tests).
```bash
# in the Lean rootdir
elan toolchain link lean4 build/release/stage1
elan toolchain link lean4-stage0 build/release/stage0
# make `lean` etc. point to stage1 in the rootdir and subdirs
elan override set lean4
cd src
# make `lean` etc. point to stage0 anywhere inside `src`
elan override set lean4-stage0
```
You can also use the `+toolchain` shorthand (e.g. `lean +lean4-debug`) to switch
@@ -59,15 +65,6 @@ If you push `my-tag` to a fork in your github account `my_name`,
you can then put `my_name/lean4:my-tag` in your `lean-toolchain` file in a project using `lake`.
(You must use a tag name that does not start with a numeral, or contain `_`).
### VS Code
There is a `lean.code-workspace` file that correctly sets up VS Code with workspace roots for the stage0/stage1 setup described above as well as with other settings.
You should always load it when working on Lean, such as by invoking
```
code lean.code-workspace
```
on the command line.
### `ccache`
Lean's build process uses [`ccache`](https://ccache.dev/) if it is

View File

@@ -17,8 +17,6 @@ Issue reports and fixes are welcome.
* aarch64 Linux with glibc 2.27+
* aarch64 (Apple Silicon) macOS
* x86 (32-bit) Linux
* Emscripten Web Assembly
<!--
### Tier 3

View File

@@ -1 +0,0 @@
lean4

View File

@@ -1,50 +0,0 @@
{
"folders": [
{
"path": "."
},
{
"path": "src"
},
{
"path": "tests"
}
],
"settings": {
"files.insertFinalNewline": true,
"files.trimTrailingWhitespace": true,
"[markdown]": {
"rewrap.wrappingColumn": 70
},
"[lean4]": {
"editor.rulers": [
100
]
}
},
"tasks": {
"version": "2.0.0",
"tasks": [
{
"label": "build",
"type": "shell",
"command": "make -C build/release -j$(nproc 2>/dev/null || sysctl -n hw.logicalcpu 2>/dev/null || echo 4)",
"problemMatcher": [],
"group": {
"kind": "build",
"isDefault": true
}
},
{
"label": "test",
"type": "shell",
"command": "NPROC=$(nproc 2>/dev/null || sysctl -n hw.logicalcpu 2>/dev/null || echo 4); CTEST_OUTPUT_ON_FAILURE=1 make -C build/release test -j$NPROC ARGS=\"-j$NPROC\"",
"problemMatcher": [],
"group": {
"kind": "test",
"isDefault": true
}
}
]
}
}

View File

@@ -10,7 +10,7 @@ function pebkac() {
[[ $# -gt 0 ]] || pebkac
case $1 in
--version)
# minimum version for `lake serve` with fallback
# minimum version for `lake server` with fallback
echo 3.1.0
;;
print-paths)

View File

@@ -1,16 +0,0 @@
#!/bin/bash
# Prefix for tags to search for
tag_prefix="nightly-"
# Fetch all tags from the remote repository
git fetch https://github.com/leanprover/lean4-nightly.git --tags > /dev/null
# Get the most recent commit that has a matching tag
tag_name=$(git tag --merged HEAD --list "${tag_prefix}*" | sort -rV | head -n 1 | sed "s/^$tag_prefix//")
if [ -z "$tag_name" ]; then
exit 1
fi
echo "$tag_name"

View File

@@ -9,7 +9,7 @@ endif()
include(ExternalProject)
project(LEAN CXX C)
set(LEAN_VERSION_MAJOR 4)
set(LEAN_VERSION_MINOR 5)
set(LEAN_VERSION_MINOR 3)
set(LEAN_VERSION_PATCH 0)
set(LEAN_VERSION_IS_RELEASE 0) # This number is 1 in the release revision, and 0 otherwise.
set(LEAN_SPECIAL_VERSION_DESC "" CACHE STRING "Additional version description like 'nightly-2018-03-11'")
@@ -64,10 +64,10 @@ option(BSYMBOLIC "Link with -Bsymbolic to reduce call overhead in shared librari
option(USE_GMP "USE_GMP" ON)
# development-specific options
option(CHECK_OLEAN_VERSION "Only load .olean files compiled with the current version of Lean" OFF)
option(CHECK_OLEAN_VERSION "Only load .olean files compiled with the current version of Lean" ON)
set(LEAN_EXTRA_MAKE_OPTS "" CACHE STRING "extra options to lean --make")
set(LEANC_CC ${CMAKE_C_COMPILER} CACHE STRING "C compiler to use in `leanc`")
set(LEANC_CC "cc" CACHE STRING "C compiler to use in `leanc`")
if ("${LAZY_RC}" MATCHES "ON")
set(LEAN_LAZY_RC "#define LEAN_LAZY_RC")
@@ -93,9 +93,8 @@ if ("${RUNTIME_STATS}" MATCHES "ON")
string(APPEND LEAN_EXTRA_CXX_FLAGS " -D LEAN_RUNTIME_STATS")
endif()
if ("${CHECK_OLEAN_VERSION}" MATCHES "ON")
set(USE_GITHASH ON)
string(APPEND LEAN_EXTRA_CXX_FLAGS " -D LEAN_CHECK_OLEAN_VERSION")
if (NOT("${CHECK_OLEAN_VERSION}" MATCHES "ON"))
string(APPEND LEAN_EXTRA_CXX_FLAGS " -D LEAN_IGNORE_OLEAN_VERSION")
endif()
if(${CMAKE_SYSTEM_NAME} MATCHES "Emscripten")
@@ -402,17 +401,26 @@ if(MULTI_THREAD AND NOT MSVC AND (NOT ("${CMAKE_SYSTEM_NAME}" MATCHES "Darwin"))
endif()
# Git HASH
set(LEAN_PACKAGE_VERSION "NOT-FOUND")
if(USE_GITHASH)
include(GetGitRevisionDescription)
get_git_head_revision(GIT_REFSPEC GIT_SHA1)
if(${GIT_SHA1} MATCHES "GITDIR-NOTFOUND")
message(STATUS "Failed to read git_sha1")
set(GIT_SHA1 "")
if(EXISTS "${LEAN_SOURCE_DIR}/bin/package_version")
file(STRINGS "${LEAN_SOURCE_DIR}/bin/package_version" LEAN_PACKAGE_VERSION)
message(STATUS "Package version detected: ${LEAN_PACKAGE_VERSION}")
endif()
else()
message(STATUS "git commit sha1: ${GIT_SHA1}")
endif()
else()
set(GIT_SHA1 "")
if(EXISTS "${LEAN_SOURCE_DIR}/bin/package_version")
file(STRINGS "${LEAN_SOURCE_DIR}/bin/package_version" LEAN_PACKAGE_VERSION)
message(STATUS "Package version detected: ${LEAN_PACKAGE_VERSION}")
endif()
endif()
configure_file("${LEAN_SOURCE_DIR}/githash.h.in" "${LEAN_BINARY_DIR}/githash.h")
@@ -439,13 +447,12 @@ include_directories(${LEAN_SOURCE_DIR})
include_directories(${CMAKE_BINARY_DIR}) # version.h etc., "private" headers
include_directories(${CMAKE_BINARY_DIR}/include) # config.h etc., "public" headers
# Use CMake profile C++ flags for building Lean libraries, but do not embed in `leanc`
string(TOUPPER "${CMAKE_BUILD_TYPE}" uppercase_CMAKE_BUILD_TYPE)
# These are used in lean.mk (and libleanrt) and passed through by stdlib.make
# They are not embedded into `leanc` since they are build profile/machine specific
string(APPEND LEANC_OPTS " ${CMAKE_CXX_FLAGS_${uppercase_CMAKE_BUILD_TYPE}}")
# Do embed flag for finding system libraries in dev builds
if(CMAKE_OSX_SYSROOT AND NOT LEAN_STANDALONE)
string(APPEND LEANC_EXTRA_FLAGS " ${CMAKE_CXX_SYSROOT_FLAG}${CMAKE_OSX_SYSROOT}")
string(APPEND LEANC_OPTS " ${CMAKE_CXX_SYSROOT_FLAG}${CMAKE_OSX_SYSROOT}")
endif()
if(${STAGE} GREATER 1)

View File

@@ -17,7 +17,6 @@ import Init.System
import Init.Util
import Init.Dynamic
import Init.ShareCommon
import Init.MetaTypes
import Init.Meta
import Init.NotationExtra
import Init.SimpLemmas

View File

@@ -468,9 +468,6 @@ def elem [BEq α] (a : α) (as : Array α) : Bool :=
else
(true, r)
/-- Convert a `Array α` into an `List α`. This is O(n) in the size of the array. -/
-- This function is exported to C, where it is called by `Array.data`
-- (the projection) to implement this functionality.
@[export lean_array_to_list]
def toList (as : Array α) : List α :=
as.foldr List.cons []

View File

@@ -1,7 +1,7 @@
/-
Copyright (c) 2022 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Leonardo de Moura, Joachim Breitner
Authors: Leonardo de Moura
-/
prelude
import Init.Data.Array.Basic
@@ -20,26 +20,32 @@ theorem List.sizeOf_get_lt [SizeOf α] (as : List α) (i : Fin as.length) : size
namespace Array
/-- `a ∈ as` is a predicate which asserts that `a` is in the array `as`. -/
-- NB: This is defined as a structure rather than a plain def so that a lemma
-- like `sizeOf_lt_of_mem` will not apply with no actual arrays around.
structure Mem (a : α) (as : Array α) : Prop where
val : a as.data
instance : Membership α (Array α) where
mem a as := Mem a as
instance [DecidableEq α] : Membership α (Array α) where
mem a as := as.contains a
theorem sizeOf_get_lt [SizeOf α] (as : Array α) (i : Fin as.size) : sizeOf (as.get i) < sizeOf as := by
cases as with | _ as =>
exact Nat.lt_trans (List.sizeOf_get_lt as i) (by simp_arith)
cases as; rename_i as
simp [get]
have ih := List.sizeOf_get_lt as i
exact Nat.lt_trans ih (by simp_arith)
theorem sizeOf_lt_of_mem [SizeOf α] {as : Array α} (h : a as) : sizeOf a < sizeOf as := by
cases as with | _ as =>
exact Nat.lt_trans (List.sizeOf_lt_of_mem h.val) (by simp_arith)
theorem sizeOf_lt_of_mem [DecidableEq α] [SizeOf α] {as : Array α} (h : a as) : sizeOf a < sizeOf as := by
simp [Membership.mem, contains, any, Id.run, BEq.beq, anyM] at h
let rec aux (j : Nat) (h : anyM.loop (m := Id) (fun b => decide (a = b)) as as.size (Nat.le_refl ..) j = true) : sizeOf a < sizeOf as := by
unfold anyM.loop at h
split at h
· simp [Bind.bind, pure] at h; split at h
next he => subst a; apply sizeOf_get_lt
next => have ih := aux (j+1) h; assumption
· contradiction
apply aux 0 h
termination_by aux j _ => as.size - j
@[simp] theorem sizeOf_get [SizeOf α] (as : Array α) (i : Fin as.size) : sizeOf (as.get i) < sizeOf as := by
cases as with | _ as =>
exact Nat.lt_trans (List.sizeOf_get ..) (by simp_arith)
cases as
simp [get]
apply Nat.lt_trans (List.sizeOf_get ..)
simp_arith
/-- This tactic, added to the `decreasing_trivial` toolbox, proves that
`sizeOf arr[i] < sizeOf arr`, which is useful for well founded recursions
@@ -51,17 +57,4 @@ macro "array_get_dec" : tactic =>
macro_rules | `(tactic| decreasing_trivial) => `(tactic| array_get_dec)
/-- This tactic, added to the `decreasing_trivial` toolbox, proves that `sizeOf a < sizeOf arr`
provided that `a ∈ arr` which is useful for well founded recursions over a nested inductive like
`inductive T | mk : Array T → T`. -/
-- NB: This is analogue to tactic `sizeOf_list_dec`
macro "array_mem_dec" : tactic =>
`(tactic| first
| apply Array.sizeOf_lt_of_mem; assumption; done
| apply Nat.lt_trans (Array.sizeOf_lt_of_mem ?h)
case' h => assumption
simp_arith)
macro_rules | `(tactic| decreasing_trivial) => `(tactic| array_mem_dec)
end Array

View File

@@ -45,19 +45,19 @@ protected def sub : Fin n → Fin n → Fin n
| a, h, b, _ => (a + (n - b)) % n, mlt h
/-!
Remark: land/lor can be defined without using (% n), but
Remark: mod/div/modn/land/lor can be defined without using (% n), but
we are trying to minimize the number of Nat theorems
needed to bootstrap Lean.
-/
protected def mod : Fin n Fin n Fin n
| a, h, b, _ => a % b, Nat.lt_of_le_of_lt (Nat.mod_le _ _) h
| a, h, b, _ => (a % b) % n, mlt h
protected def div : Fin n Fin n Fin n
| a, h, b, _ => a / b, Nat.lt_of_le_of_lt (Nat.div_le_self _ _) h
| a, h, b, _ => (a / b) % n, mlt h
def modn : Fin n Nat Fin n
| a, h, m => a % m, Nat.lt_of_le_of_lt (Nat.mod_le _ _) h
| a, h, m => (a % m) % n, mlt h
def land : Fin n Fin n Fin n
| a, h, b, _ => (Nat.land a b) % n, mlt h
@@ -110,7 +110,7 @@ theorem val_ne_of_ne {i j : Fin n} (h : i ≠ j) : val i ≠ val j :=
fun h' => absurd (eq_of_val_eq h') h
theorem modn_lt : {m : Nat} (i : Fin n), m > 0 (modn i m).val < m
| _, _, _, hp => by simp [modn]; apply Nat.mod_lt; assumption
| _, _, _, hp => Nat.lt_of_le_of_lt (mod_le _ _) (mod_lt _ hp)
theorem val_lt_of_le (i : Fin b) (h : b n) : i.val < n :=
Nat.lt_of_lt_of_le i.isLt h

View File

@@ -132,7 +132,7 @@ instance : ReprAtom Float := ⟨⟩
@[extern "round"] opaque Float.round : Float Float
@[extern "fabs"] opaque Float.abs : Float Float
instance : HomogeneousPow Float := Float.pow
instance : Pow Float Float := Float.pow
instance : Min Float := minOfLe

View File

@@ -6,48 +6,9 @@ Author: Leonardo de Moura
prelude
import Init.SimpLemmas
import Init.Data.Nat.Basic
import Init.Data.Nat.Div
set_option linter.missingDocs true -- keep it documented
open Decidable List
/--
The syntax `[a, b, c]` is shorthand for `a :: b :: c :: []`, or
`List.cons a (List.cons b (List.cons c List.nil))`. It allows conveniently constructing
list literals.
For lists of length at least 64, an alternative desugaring strategy is used
which uses let bindings as intermediates as in
`let left := [d, e, f]; a :: b :: c :: left` to avoid creating very deep expressions.
Note that this changes the order of evaluation, although it should not be observable
unless you use side effecting operations like `dbg_trace`.
-/
syntax "[" withoutPosition(term,*,?) "]" : term
/--
Auxiliary syntax for implementing `[$elem,*]` list literal syntax.
The syntax `%[a,b,c|tail]` constructs a value equivalent to `a::b::c::tail`.
It uses binary partitioning to construct a tree of intermediate let bindings as in
`let left := [d, e, f]; a :: b :: c :: left` to avoid creating very deep expressions.
-/
syntax "%[" withoutPosition(term,*,? " | " term) "]" : term
namespace Lean
macro_rules
| `([ $elems,* ]) => do
-- NOTE: we do not have `TSepArray.getElems` yet at this point
let rec expandListLit (i : Nat) (skip : Bool) (result : TSyntax `term) : MacroM Syntax := do
match i, skip with
| 0, _ => pure result
| i+1, true => expandListLit i false result
| i+1, false => expandListLit i true ( ``(List.cons $(elems.elemsAndSeps.get! i) $result))
let size := elems.elemsAndSeps.size
if size < 64 then
expandListLit size (size % 2 == 0) ( ``(List.nil))
else
`(%[ $elems,* | List.nil ])
end Lean
universe u v w
variable {α : Type u} {β : Type v} {γ : Type w}
@@ -395,7 +356,7 @@ inductive Mem (a : α) : List α → Prop
instance : Membership α (List α) where
mem := Mem
theorem mem_of_elem_eq_true [BEq α] [LawfulBEq α] {a : α} {as : List α} : elem a as = true a as := by
theorem mem_of_elem_eq_true [DecidableEq α] {a : α} {as : List α} : elem a as = true a as := by
match as with
| [] => simp [elem]
| a'::as =>
@@ -404,12 +365,12 @@ theorem mem_of_elem_eq_true [BEq α] [LawfulBEq α] {a : α} {as : List α} : el
next h => intros; simp [BEq.beq] at h; subst h; apply Mem.head
next _ => intro h; exact Mem.tail _ (mem_of_elem_eq_true h)
theorem elem_eq_true_of_mem [BEq α] [LawfulBEq α] {a : α} {as : List α} (h : a as) : elem a as = true := by
theorem elem_eq_true_of_mem [DecidableEq α] {a : α} {as : List α} (h : a as) : elem a as = true := by
induction h with
| head _ => simp [elem]
| tail _ _ ih => simp [elem]; split; rfl; assumption
instance [BEq α] [LawfulBEq α] (a : α) (as : List α) : Decidable (a as) :=
instance [DecidableEq α] (a : α) (as : List α) : Decidable (a as) :=
decidable_of_decidable_of_iff (Iff.intro mem_of_elem_eq_true elem_eq_true_of_mem)
theorem mem_append_of_mem_left {a : α} {as : List α} (bs : List α) : a as a as ++ bs := by

View File

@@ -629,7 +629,7 @@ protected theorem sub_lt_sub_left : ∀ {k m n : Nat}, k < m → k < n → m - n
@[simp] protected theorem zero_sub (n : Nat) : 0 - n = 0 := by
induction n with
| zero => rfl
| succ n ih => simp only [ih, Nat.sub_succ]; decide
| succ n ih => simp [ih, Nat.sub_succ]
protected theorem sub_self_add (n m : Nat) : n - (n + m) = 0 := by
show (n + 0) - (n + m) = 0

View File

@@ -285,7 +285,7 @@ instance : Max UInt64 := maxOfLe
instance : Min UInt64 := minOfLe
theorem usize_size_gt_zero : USize.size > 0 :=
Nat.zero_lt_succ ..
Nat.pos_pow_of_pos System.Platform.numBits (Nat.zero_lt_succ _)
@[extern "lean_usize_of_nat"]
def USize.ofNat (n : @& Nat) : USize := Fin.ofNat' n usize_size_gt_zero

View File

@@ -6,7 +6,6 @@ Authors: Leonardo de Moura and Sebastian Ullrich
Additional goodies for writing macros
-/
prelude
import Init.MetaTypes
import Init.Data.Array.Basic
import Init.Data.Option.BasicAux
@@ -65,15 +64,6 @@ def toolchain :=
@[extern "lean_internal_is_stage0"]
opaque Internal.isStage0 (u : Unit) : Bool
/--
This function can be used to detect whether the compiler has support for
generating LLVM instead of C. It is used by lake instead of the --features
flag in order to avoid having to run a compiler for this every time on startup.
See #2572.
-/
@[extern "lean_internal_has_llvm_backend"]
opaque Internal.hasLLVMBackend (u : Unit) : Bool
/-- Valid identifier names -/
def isGreek (c : Char) : Bool :=
0x391 c.val && c.val 0x3dd
@@ -227,6 +217,11 @@ instance : DecidableEq Name :=
end Name
structure NameGenerator where
namePrefix : Name := `_uniq
idx : Nat := 1
deriving Inhabited
namespace NameGenerator
@[inline] def curr (g : NameGenerator) : Name :=
@@ -457,6 +452,11 @@ end Syntax
| none => x
| some ref => withRef ref x
/-- Syntax objects for a Lean module. -/
structure Module where
header : Syntax
commands : Array Syntax
/--
Expand macros in the given syntax.
A node with kind `k` is visited only if `p k` is true.
@@ -1203,7 +1203,88 @@ end TSyntax
namespace Meta
deriving instance Repr for TransparencyMode, EtaStructMode, DSimp.Config, Simp.Config
inductive TransparencyMode where
| all | default | reducible | instances
deriving Inhabited, BEq, Repr
inductive EtaStructMode where
/-- Enable eta for structure and classes. -/
| all
/-- Enable eta only for structures that are not classes. -/
| notClasses
/-- Disable eta for structures and classes. -/
| none
deriving Inhabited, BEq, Repr
namespace DSimp
structure Config where
zeta : Bool := true
beta : Bool := true
eta : Bool := true
etaStruct : EtaStructMode := .all
iota : Bool := true
proj : Bool := true
decide : Bool := false
autoUnfold : Bool := false
/-- If `failIfUnchanged := true`, then calls to `simp`, `dsimp`, or `simp_all`
will fail if they do not make progress. -/
failIfUnchanged : Bool := true
deriving Inhabited, BEq, Repr
end DSimp
namespace Simp
def defaultMaxSteps := 100000
structure Config where
maxSteps : Nat := defaultMaxSteps
maxDischargeDepth : Nat := 2
contextual : Bool := false
memoize : Bool := true
singlePass : Bool := false
zeta : Bool := true
beta : Bool := true
eta : Bool := true
etaStruct : EtaStructMode := .all
iota : Bool := true
proj : Bool := true
decide : Bool := true
arith : Bool := false
autoUnfold : Bool := false
/--
If `dsimp := true`, then switches to `dsimp` on dependent arguments where there is no congruence theorem that allows
`simp` to visit them. If `dsimp := false`, then argument is not visited.
-/
dsimp : Bool := true
/-- If `failIfUnchanged := true`, then calls to `simp`, `dsimp`, or `simp_all`
will fail if they do not make progress. -/
failIfUnchanged : Bool := true
deriving Inhabited, BEq, Repr
-- Configuration object for `simp_all`
structure ConfigCtx extends Config where
contextual := true
def neutralConfig : Simp.Config := {
zeta := false
beta := false
eta := false
iota := false
proj := false
decide := false
arith := false
autoUnfold := false
}
end Simp
inductive Occurrences where
| all
| pos (idxs : List Nat)
| neg (idxs : List Nat)
deriving Inhabited, BEq
def Occurrences.contains : Occurrences → Nat → Bool
| all, _ => true
@@ -1261,14 +1342,14 @@ This will rewrite with all equation lemmas, which can be used to
partially evaluate many definitions. -/
declare_simp_like_tactic simpAutoUnfold "simp! " fun (c : Lean.Meta.Simp.Config) => { c with autoUnfold := true }
/-- `simp_arith` is shorthand for `simp` with `arith := true` and `decide := true`.
/-- `simp_arith` is shorthand for `simp` with `arith := true`.
This enables the use of normalization by linear arithmetic. -/
declare_simp_like_tactic simpArith "simp_arith " fun (c : Lean.Meta.Simp.Config) => { c with arith := true, decide := true }
declare_simp_like_tactic simpArith "simp_arith " fun (c : Lean.Meta.Simp.Config) => { c with arith := true }
/-- `simp_arith!` is shorthand for `simp_arith` with `autoUnfold := true`.
This will rewrite with all equation lemmas, which can be used to
partially evaluate many definitions. -/
declare_simp_like_tactic simpArithAutoUnfold "simp_arith! " fun (c : Lean.Meta.Simp.Config) => { c with arith := true, autoUnfold := true, decide := true }
declare_simp_like_tactic simpArithAutoUnfold "simp_arith! " fun (c : Lean.Meta.Simp.Config) => { c with arith := true, autoUnfold := true }
/-- `simp_all!` is shorthand for `simp_all` with `autoUnfold := true`.
This will rewrite with all equation lemmas, which can be used to
@@ -1276,10 +1357,10 @@ partially evaluate many definitions. -/
declare_simp_like_tactic (all := true) simpAllAutoUnfold "simp_all! " fun (c : Lean.Meta.Simp.ConfigCtx) => { c with autoUnfold := true }
/-- `simp_all_arith` combines the effects of `simp_all` and `simp_arith`. -/
declare_simp_like_tactic (all := true) simpAllArith "simp_all_arith " fun (c : Lean.Meta.Simp.ConfigCtx) => { c with arith := true, decide := true }
declare_simp_like_tactic (all := true) simpAllArith "simp_all_arith " fun (c : Lean.Meta.Simp.ConfigCtx) => { c with arith := true }
/-- `simp_all_arith!` combines the effects of `simp_all`, `simp_arith` and `simp!`. -/
declare_simp_like_tactic (all := true) simpAllArithAutoUnfold "simp_all_arith! " fun (c : Lean.Meta.Simp.ConfigCtx) => { c with arith := true, autoUnfold := true, decide := true }
declare_simp_like_tactic (all := true) simpAllArithAutoUnfold "simp_all_arith! " fun (c : Lean.Meta.Simp.ConfigCtx) => { c with arith := true, autoUnfold := true }
/-- `dsimp!` is shorthand for `dsimp` with `autoUnfold := true`.
This will rewrite with all equation lemmas, which can be used to

View File

@@ -1,117 +0,0 @@
/-
Copyright (c) Leonardo de Moura. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Leonardo de Moura
-/
prelude
import Init.Core
namespace Lean
structure NameGenerator where
namePrefix : Name := `_uniq
idx : Nat := 1
deriving Inhabited
/-- Syntax objects for a Lean module. -/
structure Module where
header : Syntax
commands : Array Syntax
namespace Meta
inductive TransparencyMode where
| all | default | reducible | instances
deriving Inhabited, BEq
inductive EtaStructMode where
/-- Enable eta for structure and classes. -/
| all
/-- Enable eta only for structures that are not classes. -/
| notClasses
/-- Disable eta for structures and classes. -/
| none
deriving Inhabited, BEq
namespace DSimp
structure Config where
zeta : Bool := true
beta : Bool := true
eta : Bool := true
etaStruct : EtaStructMode := .all
iota : Bool := true
proj : Bool := true
decide : Bool := false
autoUnfold : Bool := false
/-- If `failIfUnchanged := true`, then calls to `simp`, `dsimp`, or `simp_all`
will fail if they do not make progress. -/
failIfUnchanged : Bool := true
/-- If `unfoldPartialApp := true`, then calls to `simp`, `dsimp`, or `simp_all`
will unfold even partial applications of `f` when we request `f` to be unfolded. -/
unfoldPartialApp : Bool := false
deriving Inhabited, BEq
end DSimp
namespace Simp
def defaultMaxSteps := 100000
structure Config where
maxSteps : Nat := defaultMaxSteps
maxDischargeDepth : Nat := 2
contextual : Bool := false
memoize : Bool := true
singlePass : Bool := false
zeta : Bool := true
beta : Bool := true
eta : Bool := true
etaStruct : EtaStructMode := .all
iota : Bool := true
proj : Bool := true
decide : Bool := false
arith : Bool := false
autoUnfold : Bool := false
/--
If `dsimp := true`, then switches to `dsimp` on dependent arguments where there is no congruence theorem that allows
`simp` to visit them. If `dsimp := false`, then argument is not visited.
-/
dsimp : Bool := true
/-- If `failIfUnchanged := true`, then calls to `simp`, `dsimp`, or `simp_all`
will fail if they do not make progress. -/
failIfUnchanged : Bool := true
/-- If `ground := true`, then ground terms are reduced. A term is ground when
it does not contain free or meta variables. Reduction is interrupted at a function application `f ...`
if `f` is marked to not be unfolded. -/
ground : Bool := false
/-- If `unfoldPartialApp := true`, then calls to `simp`, `dsimp`, or `simp_all`
will unfold even partial applications of `f` when we request `f` to be unfolded. -/
unfoldPartialApp : Bool := false
deriving Inhabited, BEq
-- Configuration object for `simp_all`
structure ConfigCtx extends Config where
contextual := true
def neutralConfig : Simp.Config := {
zeta := false
beta := false
eta := false
iota := false
proj := false
decide := false
arith := false
autoUnfold := false
ground := false
}
end Simp
inductive Occurrences where
| all
| pos (idxs : List Nat)
| neg (idxs : List Nat)
deriving Inhabited, BEq
end Lean.Meta

View File

@@ -295,8 +295,7 @@ macro_rules | `($x - $y) => `(binop% HSub.hSub $x $y)
macro_rules | `($x * $y) => `(binop% HMul.hMul $x $y)
macro_rules | `($x / $y) => `(binop% HDiv.hDiv $x $y)
macro_rules | `($x % $y) => `(binop% HMod.hMod $x $y)
-- exponentiation should be considered a right action (#2220)
macro_rules | `($x ^ $y) => `(rightact% HPow.hPow $x $y)
macro_rules | `($x ^ $y) => `(binop% HPow.hPow $x $y)
macro_rules | `($x ++ $y) => `(binop% HAppend.hAppend $x $y)
macro_rules | `(- $x) => `(unop% Neg.neg $x)
@@ -461,8 +460,42 @@ expected type is known. So, `without_expected_type` is not effective in this cas
-/
macro "without_expected_type " x:term : term => `(let aux := $x; aux)
/--
The syntax `[a, b, c]` is shorthand for `a :: b :: c :: []`, or
`List.cons a (List.cons b (List.cons c List.nil))`. It allows conveniently constructing
list literals.
For lists of length at least 64, an alternative desugaring strategy is used
which uses let bindings as intermediates as in
`let left := [d, e, f]; a :: b :: c :: left` to avoid creating very deep expressions.
Note that this changes the order of evaluation, although it should not be observable
unless you use side effecting operations like `dbg_trace`.
-/
syntax "[" withoutPosition(term,*) "]" : term
/--
Auxiliary syntax for implementing `[$elem,*]` list literal syntax.
The syntax `%[a,b,c|tail]` constructs a value equivalent to `a::b::c::tail`.
It uses binary partitioning to construct a tree of intermediate let bindings as in
`let left := [d, e, f]; a :: b :: c :: left` to avoid creating very deep expressions.
-/
syntax "%[" withoutPosition(term,* " | " term) "]" : term
namespace Lean
macro_rules
| `([ $elems,* ]) => do
-- NOTE: we do not have `TSepArray.getElems` yet at this point
let rec expandListLit (i : Nat) (skip : Bool) (result : TSyntax `term) : MacroM Syntax := do
match i, skip with
| 0, _ => pure result
| i+1, true => expandListLit i false result
| i+1, false => expandListLit i true ( ``(List.cons $(elems.elemsAndSeps.get! i) $result))
if elems.elemsAndSeps.size < 64 then
expandListLit elems.elemsAndSeps.size false ( ``(List.nil))
else
`(%[ $elems,* | List.nil ])
/--
Category for carrying raw syntax trees between macros; any content is printed as is by the pretty printer.
The only accepted parser for this category is an antiquotation.

View File

@@ -806,12 +806,6 @@ decidability instance instead of the proposition, which has no code).
If a proposition `p` is `Decidable`, then `(by decide : p)` will prove it by
evaluating the decidability instance to `isTrue h` and returning `h`.
Because `Decidable` carries data,
when writing `@[simp]` lemmas which include a `Decidable` instance on the LHS,
it is best to use `{_ : Decidable p}` rather than `[Decidable p]`
so that non-canonical instances can be found via unification rather than
typeclass search.
-/
class inductive Decidable (p : Prop) where
/-- Prove that `p` is decidable by supplying a proof of `¬p` -/
@@ -926,9 +920,7 @@ or derive `i < arr.size` from some other proposition that we are checking in the
return `t` or `e` depending on whether `c` is true or false. The explicit argument
`c : Prop` does not have any actual computational content, but there is an additional
`[Decidable c]` argument synthesized by typeclass inference which actually
determines how to evaluate `c` to true or false. Write `if h : c then t else e`
instead for a "dependent if-then-else" `dite`, which allows `t`/`e` to use the fact
that `c` is true/false.
determines how to evaluate `c` to true or false.
Because lean uses a strict (call-by-value) evaluation strategy, the signature of this
function is problematic in that it would require `t` and `e` to be evaluated before
@@ -1305,38 +1297,11 @@ class Mod (α : Type u) where
The homogeneous version of `HPow`: `a ^ b : α` where `a : α`, `b : β`.
(The right argument is not the same as the left since we often want this even
in the homogeneous case.)
Types can choose to subscribe to particular defaulting behavior by providing
an instance to either `NatPow` or `HomogeneousPow`:
- `NatPow` is for types whose exponents is preferentially a `Nat`.
- `HomogeneousPow` is for types whose base and exponent are preferentially the same.
-/
class Pow (α : Type u) (β : Type v) where
/-- `a ^ b` computes `a` to the power of `b`. See `HPow`. -/
pow : α β α
/-- The homogenous version of `Pow` where the exponent is a `Nat`.
The purpose of this class is that it provides a default `Pow` instance,
which can be used to specialize the exponent to `Nat` during elaboration.
For example, if `x ^ 2` should preferentially elaborate with `2 : Nat` then `x`'s type should
provide an instance for this class. -/
class NatPow (α : Type u) where
/-- `a ^ n` computes `a` to the power of `n` where `n : Nat`. See `Pow`. -/
protected pow : α Nat α
/-- The completely homogeneous version of `Pow` where the exponent has the same type as the base.
The purpose of this class is that it provides a default `Pow` instance,
which can be used to specialize the exponent to have the same type as the base's type during elaboration.
This is to say, a type should provide an instance for this class in case `x ^ y` should be elaborated
with both `x` and `y` having the same type.
For example, the `Float` type provides an instance of this class, which causes expressions
such as `(2.2 ^ 2.2 : Float)` to elaborate. -/
class HomogeneousPow (α : Type u) where
/-- `a ^ b` computes `a` to the power of `b` where `a` and `b` both have the same type. -/
protected pow : α α α
/-- The homogeneous version of `HAppend`: `a ++ b : α` where `a b : α`. -/
class Append (α : Type u) where
/-- `a ++ b` is the result of concatenation of `a` and `b`. See `HAppend`. -/
@@ -1420,14 +1385,6 @@ instance [Mod α] : HMod α α α where
instance [Pow α β] : HPow α β α where
hPow a b := Pow.pow a b
@[default_instance]
instance [NatPow α] : Pow α Nat where
pow a n := NatPow.pow a n
@[default_instance]
instance [HomogeneousPow α] : Pow α α where
pow a b := HomogeneousPow.pow a b
@[default_instance]
instance [Append α] : HAppend α α α where
hAppend a b := Append.append a b
@@ -1523,7 +1480,8 @@ protected def Nat.pow (m : @& Nat) : (@& Nat) → Nat
| 0 => 1
| succ n => Nat.mul (Nat.pow m n) m
instance : NatPow Nat := Nat.pow
instance : Pow Nat Nat where
pow := Nat.pow
set_option bootstrap.genMatcherCode false in
/--
@@ -1816,7 +1774,7 @@ instance Fin.decLt {n} (a b : Fin n) : Decidable (LT.lt a b) := Nat.decLt ..
instance Fin.decLe {n} (a b : Fin n) : Decidable (LE.le a b) := Nat.decLe ..
/-- The size of type `UInt8`, that is, `2^8 = 256`. -/
abbrev UInt8.size : Nat := 256
def UInt8.size : Nat := 256
/--
The type of unsigned 8-bit integers. This type has special support in the
@@ -1855,7 +1813,7 @@ instance : Inhabited UInt8 where
default := UInt8.ofNatCore 0 (by decide)
/-- The size of type `UInt16`, that is, `2^16 = 65536`. -/
abbrev UInt16.size : Nat := 65536
def UInt16.size : Nat := 65536
/--
The type of unsigned 16-bit integers. This type has special support in the
@@ -1894,7 +1852,7 @@ instance : Inhabited UInt16 where
default := UInt16.ofNatCore 0 (by decide)
/-- The size of type `UInt32`, that is, `2^32 = 4294967296`. -/
abbrev UInt32.size : Nat := 4294967296
def UInt32.size : Nat := 4294967296
/--
The type of unsigned 32-bit integers. This type has special support in the
@@ -1971,7 +1929,7 @@ instance : Max UInt32 := maxOfLe
instance : Min UInt32 := minOfLe
/-- The size of type `UInt64`, that is, `2^64 = 18446744073709551616`. -/
abbrev UInt64.size : Nat := 18446744073709551616
def UInt64.size : Nat := 18446744073709551616
/--
The type of unsigned 64-bit integers. This type has special support in the
compiler to make it actually 64 bits rather than wrapping a `Nat`.
@@ -2011,26 +1969,11 @@ instance : Inhabited UInt64 where
/--
The size of type `UInt16`, that is, `2^System.Platform.numBits`, which may
be either `2^32` or `2^64` depending on the platform's architecture.
Remark: we define `USize.size` using `(2^numBits - 1) + 1` to ensure the
Lean unifier can solve contraints such as `?m + 1 = USize.size`. Recall that
`numBits` does not reduce to a numeral in the Lean kernel since it is platform
specific. Without this trick, the following definition would be rejected by the
Lean type checker.
```
def one: Fin USize.size := 1
```
Because Lean would fail to synthesize instance `OfNat (Fin USize.size) 1`.
Recall that the `OfNat` instance for `Fin` is
```
instance : OfNat (Fin (n+1)) i where
ofNat := Fin.ofNat i
```
-/
abbrev USize.size : Nat := Nat.succ (Nat.sub (hPow 2 System.Platform.numBits) 1)
def USize.size : Nat := hPow 2 System.Platform.numBits
theorem usize_size_eq : Or (Eq USize.size 4294967296) (Eq USize.size 18446744073709551616) :=
show Or (Eq (Nat.succ (Nat.sub (hPow 2 System.Platform.numBits) 1)) 4294967296) (Eq (Nat.succ (Nat.sub (hPow 2 System.Platform.numBits) 1)) 18446744073709551616) from
show Or (Eq (hPow 2 System.Platform.numBits) 4294967296) (Eq (hPow 2 System.Platform.numBits) 18446744073709551616) from
match System.Platform.numBits, System.Platform.numBits_eq with
| _, Or.inl rfl => Or.inl (by decide)
| _, Or.inr rfl => Or.inr (by decide)
@@ -2548,22 +2491,13 @@ is not observable from lean code. Arrays perform best when unshared; as long
as they are used "linearly" all updates will be performed destructively on the
array, so it has comparable performance to mutable arrays in imperative
programming languages.
From the point of view of proofs `Array α` is just a wrapper around `List α`.
-/
structure Array (α : Type u) where
/--
Converts a `List α` into an `Array α`.
At runtime, this constructor is implemented by `List.toArray` and is O(n) in the length of the
list.
-/
/-- Convert a `List α` into an `Array α`. This function is overridden
to `List.toArray` and is O(n) in the length of the list. -/
mk ::
/--
Converts a `Array α` into an `List α`.
At runtime, this projection is implemented by `Array.toList` and is O(n) in the length of the
array. -/
/-- Convert an `Array α` into a `List α`. This function is overridden
to `Array.toList` and is O(n) in the length of the list. -/
data : List α
attribute [extern "lean_array_data"] Array.data
@@ -2711,9 +2645,12 @@ def List.redLength : List α → Nat
| nil => 0
| cons _ as => as.redLength.succ
/-- Convert a `List α` into an `Array α`. This is O(n) in the length of the list. -/
-- This function is exported to C, where it is called by `Array.mk`
-- (the constructor) to implement this functionality.
/--
Convert a `List α` into an `Array α`. This is O(n) in the length of the list.
This function is exported to C, where it is called by `Array.mk`
(the constructor) to implement this functionality.
-/
@[inline, match_pattern, export lean_list_to_array]
def List.toArray (as : List α) : Array α :=
as.toArrayAux (Array.mkEmpty as.redLength)

View File

@@ -41,12 +41,6 @@ theorem implies_congr_ctx {p₁ p₂ q₁ q₂ : Prop} (h₁ : p₁ = p₂) (h
theorem forall_congr {α : Sort u} {p q : α Prop} (h : a, p a = q a) : ( a, p a) = ( a, q a) :=
(funext h : p = q) rfl
theorem forall_prop_domain_congr {p₁ p₂ : Prop} {q₁ : p₁ Prop} {q₂ : p₂ Prop}
(h₁ : p₁ = p₂)
(h₂ : a : p₂, q₁ (h₁.substr a) = q₂ a)
: ( a : p₁, q₁ a) = ( a : p₂, q₂ a) := by
subst h₁; simp [ h₂]
theorem let_congr {α : Sort u} {β : Sort v} {a a' : α} {b b' : α β}
(h₁ : a = a') (h₂ : x, b x = b' x) : (let x := a; b x) = (let x := a'; b' x) :=
h₁ (funext h₂ : b = b') rfl
@@ -104,7 +98,6 @@ theorem dite_congr {_ : Decidable b} [Decidable c]
@[simp] theorem implies_true (α : Sort u) : (α True) = True := eq_true fun _ => trivial
@[simp] theorem true_implies (p : Prop) : (True p) = p := propext (· trivial), (fun _ => ·)
@[simp] theorem not_false_eq_true : (¬ False) = True := eq_true False.elim
@[simp] theorem not_true_eq_false : (¬ True) = False := by decide
@[simp] theorem Bool.or_false (b : Bool) : (b || false) = b := by cases b <;> rfl
@[simp] theorem Bool.or_true (b : Bool) : (b || true) = true := by cases b <;> rfl
@@ -143,9 +136,9 @@ theorem Bool.or_assoc (a b c : Bool) : (a || b || c) = (a || (b || c)) := by
@[simp] theorem Bool.not_eq_true (b : Bool) : (¬(b = true)) = (b = false) := by cases b <;> decide
@[simp] theorem Bool.not_eq_false (b : Bool) : (¬(b = false)) = (b = true) := by cases b <;> decide
@[simp] theorem decide_eq_true_eq {_ : Decidable p} : (decide p = true) = p := propext <| Iff.intro of_decide_eq_true decide_eq_true
@[simp] theorem decide_not {h : Decidable p} : decide (¬ p) = !decide p := by cases h <;> rfl
@[simp] theorem not_decide_eq_true {h : Decidable p} : ((!decide p) = true) = ¬ p := by cases h <;> simp [decide, *]
@[simp] theorem decide_eq_true_eq [Decidable p] : (decide p = true) = p := propext <| Iff.intro of_decide_eq_true decide_eq_true
@[simp] theorem decide_not [h : Decidable p] : decide (¬ p) = !decide p := by cases h <;> rfl
@[simp] theorem not_decide_eq_true [h : Decidable p] : ((!decide p) = true) = ¬ p := by cases h <;> simp [decide, *]
@[simp] theorem heq_eq_eq {α : Sort u} (a b : α) : HEq a b = (a = b) := propext <| Iff.intro eq_of_heq heq_of_eq
@@ -159,7 +152,7 @@ theorem Bool.or_assoc (a b c : Bool) : (a || b || c) = (a || (b || c)) := by
@[simp] theorem bne_self_eq_false' [DecidableEq α] (a : α) : (a != a) = false := by simp [bne]
@[simp] theorem Nat.le_zero_eq (a : Nat) : (a 0) = (a = 0) :=
propext fun h => Nat.le_antisymm h (Nat.zero_le ..), fun h => by rw [h]; decide
propext fun h => Nat.le_antisymm h (Nat.zero_le ..), fun h => by simp [h]
@[simp] theorem decide_False : decide False = false := rfl
@[simp] theorem decide_True : decide True = true := rfl

View File

@@ -328,41 +328,7 @@ namespace FS
namespace Handle
@[extern "lean_io_prim_handle_mk"] opaque mk (fn : @& FilePath) (mode : FS.Mode) : IO Handle
/--
Acquires an exclusive or shared lock on the handle.
Will block to wait for the lock if necessary.
**NOTE:** Acquiring a exclusive lock while already possessing a shared lock
will NOT reliably succeed (i.e., it works on Unix but not on Windows).
-/
@[extern "lean_io_prim_handle_lock"] opaque lock (h : @& Handle) (exclusive := true) : IO Unit
/--
Tries to acquire an exclusive or shared lock on the handle.
Will NOT block for the lock, but instead return `false`.
**NOTE:** Acquiring a exclusive lock while already possessing a shared lock
will NOT reliably succeed (i.e., it works on Unix but not on Windows).
-/
@[extern "lean_io_prim_handle_try_lock"] opaque tryLock (h : @& Handle) (exclusive := true) : IO Bool
/--
Releases any previously acquired lock on the handle.
Will succeed even if no lock has been acquired.
-/
@[extern "lean_io_prim_handle_unlock"] opaque unlock (h : @& Handle) : IO Unit
@[extern "lean_io_prim_handle_flush"] opaque flush (h : @& Handle) : IO Unit
/-- Rewinds the read/write cursor to the beginning of the handle. -/
@[extern "lean_io_prim_handle_rewind"] opaque rewind (h : @& Handle) : IO Unit
/--
Truncates the handle to the read/write cursor.
Does not automatically flush. Usually this is fine because the read/write
cursor includes buffered writes. However, the combination of buffered writes,
then `rewind`, then `truncate`, then close may lead to a file with content.
If unsure, flush before truncating.
-/
@[extern "lean_io_prim_handle_truncate"] opaque truncate (h : @& Handle) : IO Unit
/--
Read up to the given number of bytes from the handle.
If the returned array is empty, an end-of-file marker has been reached.

View File

@@ -250,7 +250,7 @@ macro:1 x:tactic tk:" <;> " y:tactic:2 : tactic => `(tactic|
all_goals $y:tactic)
/-- `eq_refl` is equivalent to `exact rfl`, but has a few optimizations. -/
syntax (name := eqRefl) "eq_refl" : tactic
syntax (name := refl) "eq_refl" : tactic
/--
`rfl` tries to close the current goal using reflexivity.
@@ -435,14 +435,14 @@ non-dependent hypotheses. It has many variants:
other hypotheses.
-/
syntax (name := simp) "simp" (config)? (discharger)? (&" only")?
(" [" withoutPosition((simpStar <|> simpErase <|> simpLemma),*,?) "]")? (location)? : tactic
(" [" withoutPosition((simpStar <|> simpErase <|> simpLemma),*) "]")? (location)? : tactic
/--
`simp_all` is a stronger version of `simp [*] at *` where the hypotheses and target
are simplified multiple times until no simplification is applicable.
Only non-dependent propositional hypotheses are considered.
-/
syntax (name := simpAll) "simp_all" (config)? (discharger)? (&" only")?
(" [" withoutPosition((simpErase <|> simpLemma),*,?) "]")? : tactic
(" [" withoutPosition((simpErase <|> simpLemma),*) "]")? : tactic
/--
The `dsimp` tactic is the definitional simplifier. It is similar to `simp` but only
@@ -450,7 +450,7 @@ applies theorems that hold by reflexivity. Thus, the result is guaranteed to be
definitionally equal to the input.
-/
syntax (name := dsimp) "dsimp" (config)? (discharger)? (&" only")?
(" [" withoutPosition((simpErase <|> simpLemma),*,?) "]")? (location)? : tactic
(" [" withoutPosition((simpErase <|> simpLemma),*) "]")? (location)? : tactic
/--
`delta id1 id2 ...` delta-expands the definitions `id1`, `id2`, ....

View File

@@ -85,21 +85,3 @@ private def outOfBounds [Inhabited α] : α :=
macro:max x:term noWs "[" i:term "]" noWs "?" : term => `(getElem? $x $i)
macro:max x:term noWs "[" i:term "]" noWs "!" : term => `(getElem! $x $i)
/--
Marks given value and its object graph closure as multi-threaded if currently
marked single-threaded. This will make reference counter updates atomic and
thus more costly. It can still be useful to do eagerly when the value will be
shared between threads later anyway and there is available time budget to mark
it now. -/
@[extern "lean_runtime_mark_multi_threaded"]
def Runtime.markMultiThreaded (a : α) : α := a
/--
Marks given value and its object graph closure as persistent. This will remove
reference counter updates but prevent the closure from being deallocated until
the end of the process! It can still be useful to do eagerly when the value
will be marked persistent later anyway and there is available time budget to
mark it now or it would be unnecessarily marked multi-threaded in between. -/
@[extern "lean_runtime_mark_persistent"]
def Runtime.markPersistent (a : α) : α := a

View File

@@ -5,13 +5,12 @@ Author: Leonardo de Moura
-/
prelude
import Init.SizeOf
import Init.MetaTypes
import Init.WF
/-- Unfold definitions commonly used in well founded relation definitions.
This is primarily intended for internal use in `decreasing_tactic`. -/
macro "simp_wf" : tactic =>
`(tactic| try simp (config := { unfoldPartialApp := true }) [invImage, InvImage, Prod.lex, sizeOfWFRel, measure, Nat.lt_wfRel, WellFoundedRelation.rel])
`(tactic| try simp [invImage, InvImage, Prod.lex, sizeOfWFRel, measure, Nat.lt_wfRel, WellFoundedRelation.rel])
/-- Extensible helper tactic for `decreasing_tactic`. This handles the "base case"
reasoning after applying lexicographic order lemmas.

View File

@@ -472,7 +472,6 @@ def quoteString (s : String) : String :=
else if c == '\t' then "\\t"
else if c == '\\' then "\\\\"
else if c == '\"' then "\\\""
else if c == '?' then "\\?" -- avoid trigraphs
else if c.toNat <= 31 then
"\\x" ++ toHexDigit (c.toNat / 16) ++ toHexDigit (c.toNat % 16)
-- TODO(Leo): we should use `\unnnn` for escaping unicode characters.

View File

@@ -1541,6 +1541,15 @@ end EmitLLVM
def getLeanHBcPath : IO System.FilePath := do
return ( getLibDir ( getBuildDir)) / "lean.h.bc"
def optimizeLLVMModule (mod : LLVM.Module ctx) : IO Unit := do
let pm LLVM.createPassManager
let pmb LLVM.createPassManagerBuilder
pmb.setOptLevel 3
pmb.populateModulePassManager pm
LLVM.runPassManager pm mod
LLVM.disposePassManager pm
LLVM.disposePassManagerBuilder pmb
/-- Get the names of all global symbols in the module -/
partial def getModuleGlobals (mod : LLVM.Module llvmctx) : IO (Array (LLVM.Value llvmctx)) := do
let rec go (v : LLVM.Value llvmctx) (acc : Array (LLVM.Value llvmctx)) : IO (Array (LLVM.Value llvmctx)) := do
@@ -1559,7 +1568,7 @@ partial def getModuleFunctions (mod : LLVM.Module llvmctx) : IO (Array (LLVM.Val
`emitLLVM` is the entrypoint for the lean shell to code generate LLVM.
-/
@[export lean_ir_emit_llvm]
def emitLLVM (env : Environment) (modName : Name) (filepath : String) : IO Unit := do
def emitLLVM (env : Environment) (modName : Name) (filepath : String) (tripleStr? : Option String) : IO Unit := do
LLVM.llvmInitializeTargetInfo
let llvmctx LLVM.createContext
let module LLVM.createModule llvmctx modName.toString
@@ -1592,7 +1601,17 @@ def emitLLVM (env : Environment) (modName : Name) (filepath : String) : IO Unit
let some fn LLVM.getNamedFunction emitLLVMCtx.llvmmodule name
| throw <| IO.Error.userError s!"ERROR: linked module must have function from runtime module: '{name}'"
LLVM.setLinkage fn LLVM.Linkage.internal
optimizeLLVMModule emitLLVMCtx.llvmmodule
LLVM.writeBitcodeToFile emitLLVMCtx.llvmmodule filepath
let tripleStr := tripleStr?.getD ( LLVM.getDefaultTargetTriple)
let target LLVM.getTargetFromTriple tripleStr
let cpu := "generic"
let features := ""
let targetMachine LLVM.createTargetMachine target tripleStr cpu features
let codegenType := LLVM.CodegenFileType.ObjectFile
LLVM.targetMachineEmitToFile targetMachine emitLLVMCtx.llvmmodule (filepath ++ ".o") codegenType
LLVM.disposeModule emitLLVMCtx.llvmmodule
LLVM.disposeTargetMachine targetMachine
| .error err => throw (IO.Error.userError err)
end Lean.IR

View File

@@ -46,7 +46,7 @@ structure Pass where
Resulting phase.
-/
phaseOut : Phase := phase
phaseInv : phaseOut phase := by simp_arith
phaseInv : phaseOut phase := by simp
/--
The name of the `Pass`
-/

View File

@@ -65,12 +65,6 @@ structure Context where
initHeartbeats : Nat := 0
maxHeartbeats : Nat := getMaxHeartbeats options
currMacroScope : MacroScope := firstFrontendMacroScope
/--
If `catchRuntimeEx = false`, then given `try x catch ex => h ex`,
an runtime exception occurring in `x` is not handled by `h`.
Recall that runtime exceptions are `maxRecDepth` or `maxHeartbeats`.
-/
catchRuntimeEx : Bool := false
deriving Nonempty
/-- CoreM is a monad for manipulating the Lean environment.
@@ -356,42 +350,4 @@ def ImportM.runCoreM (x : CoreM α) : ImportM α := do
let (a, _) x.toIO { options := ctx.opts, fileName := "<ImportM>", fileMap := default } { env := ctx.env }
return a
/-- Return `true` if the exception was generated by one our resource limits. -/
def Exception.isRuntime (ex : Exception) : Bool :=
ex.isMaxHeartbeat || ex.isMaxRecDepth
/--
Custom `try-catch` for all monads based on `CoreM`. We don't want to catch "runtime exceptions"
in these monads, but on `CommandElabM`. See issues #2775 and #2744
-/
@[inline] protected def Core.tryCatch (x : CoreM α) (h : Exception CoreM α) : CoreM α := do
try
x
catch ex =>
if ex.isRuntime && !( read).catchRuntimeEx then
throw ex
else
h ex
instance : MonadExceptOf Exception CoreM where
throw := throw
tryCatch := Core.tryCatch
@[inline] def Core.withCatchingRuntimeEx (flag : Bool) (x : CoreM α) : CoreM α :=
withReader (fun ctx => { ctx with catchRuntimeEx := flag }) x
@[inline] def mapCoreM [MonadControlT CoreM m] [Monad m] (f : forall {α}, CoreM α CoreM α) {α} (x : m α) : m α :=
controlAt CoreM fun runInBase => f <| runInBase x
/--
Execute `x` with `catchRuntimeEx = flag`. That is, given `try x catch ex => h ex`,
if `x` throws a runtime exception, the handler `h` will be invoked if `flag = true`
Recall that
-/
@[inline] def withCatchingRuntimeEx [MonadControlT CoreM m] [Monad m] (x : m α) : m α :=
mapCoreM (Core.withCatchingRuntimeEx true) x
@[inline] def withoutCatchingRuntimeEx [MonadControlT CoreM m] [Monad m] (x : m α) : m α :=
mapCoreM (Core.withCatchingRuntimeEx false) x
end Lean

View File

@@ -305,7 +305,7 @@ section
throw $ userError s!"Expected method '{expectedMethod}', got method '{method}'"
| _ => throw $ userError s!"Expected JSON-RPC notification, got: '{(toJson m).compress}'"
def readResponseAs (h : FS.Stream) (nBytes : Nat) (expectedID : RequestID) (α) [FromJson α] : IO (Response α) := do
partial def readResponseAs (h : FS.Stream) (nBytes : Nat) (expectedID : RequestID) (α) [FromJson α] : IO (Response α) := do
let m h.readMessage nBytes
match m with
| Message.response id result =>
@@ -315,6 +315,7 @@ section
| Except.error inner => throw $ userError s!"Unexpected result '{result.compress}'\n{inner}"
else
throw $ userError s!"Expected id {expectedID}, got id {id}"
| Message.notification .. => readResponseAs h nBytes expectedID α
| _ => throw $ userError s!"Expected JSON-RPC response, got: '{(toJson m).compress}'"
end

View File

@@ -97,10 +97,6 @@ def insert : KVMap → Name → DataValue → KVMap
def contains (m : KVMap) (n : Name) : Bool :=
(m.find n).isSome
/-- Erase an entry from the map -/
def erase : KVMap Name KVMap
| m, k => m.filter fun a => a.1 k
def getString (m : KVMap) (k : Name) (defVal := "") : String :=
match m.find k with
| some (DataValue.ofString v) => v
@@ -149,30 +145,6 @@ def setName (m : KVMap) (k : Name) (v : Name) : KVMap :=
def setSyntax (m : KVMap) (k : Name) (v : Syntax) : KVMap :=
m.insert k (DataValue.ofSyntax v)
/-- Update a `String` entry based on its current value. -/
def updateString (m : KVMap) (k : Name) (f : String String) : KVMap :=
m.insert k <| DataValue.ofString <| f <| m.getString k
/-- Update a `Nat` entry based on its current value. -/
def updateNat (m : KVMap) (k : Name) (f : Nat Nat) : KVMap :=
m.insert k <| DataValue.ofNat <| f <| m.getNat k
/-- Update an `Int` entry based on its current value. -/
def updateInt (m : KVMap) (k : Name) (f : Int Int) : KVMap :=
m.insert k <| DataValue.ofInt <| f <| m.getInt k
/-- Update a `Bool` entry based on its current value. -/
def updateBool (m : KVMap) (k : Name) (f : Bool Bool) : KVMap :=
m.insert k <| DataValue.ofBool <| f <| m.getBool k
/-- Update a `Name` entry based on its current value. -/
def updateName (m : KVMap) (k : Name) (f : Name Name) : KVMap :=
m.insert k <| DataValue.ofName <| f <| m.getName k
/-- Update a `Syntax` entry based on its current value. -/
def updateSyntax (m : KVMap) (k : Name) (f : Syntax Syntax) : KVMap :=
m.insert k <| DataValue.ofSyntax <| f <| m.getSyntax k
@[inline] protected def forIn.{w, w'} {δ : Type w} {m : Type w Type w'} [Monad m]
(kv : KVMap) (init : δ) (f : Name × DataValue δ m (ForInStep δ)) : m δ :=
kv.entries.forIn init f
@@ -190,16 +162,6 @@ def subsetAux : List (Name × DataValue) → KVMap → Bool
def subset : KVMap KVMap Bool
| m₁, m₂ => subsetAux m₁ m₂
def mergeBy (mergeFn : Name DataValue DataValue DataValue) (l r : KVMap)
: KVMap := Id.run do
let mut result := l
for k, vᵣ in r do
if let some vₗ := result.find k then
result := result.insert k (mergeFn k vₗ vᵣ)
else
result := result.insert k vᵣ
return result
def eqv (m₁ m₂ : KVMap) : Bool :=
subset m₁ m₂ && subset m₂ m₁
@@ -219,11 +181,6 @@ class Value (α : Type) where
@[inline] def set {α : Type} [Value α] (m : KVMap) (k : Name) (v : α) : KVMap :=
m.insert k (Value.toDataValue v)
@[inline] def update {α : Type} [Value α] (m : KVMap) (k : Name) (f : Option α Option α) : KVMap :=
match f (m.get? k) with
| some a => m.set k a
| none => m.erase k
instance : Value DataValue where
toDataValue := id
ofDataValue? := some

View File

@@ -206,7 +206,7 @@ instance : FromJson DocumentChange where
[reference](https://microsoft.github.io/language-server-protocol/specifications/lsp/3.17/specification/#workspaceEdit) -/
structure WorkspaceEdit where
/-- Changes to existing resources. -/
changes? : Option (RBMap DocumentUri TextEditBatch compare) := none
changes : RBMap DocumentUri TextEditBatch compare :=
/-- Depending on the client capability
`workspace.workspaceEdit.resourceOperations` document changes are either
an array of `TextDocumentEdit`s to express changes to n different text
@@ -220,14 +220,14 @@ structure WorkspaceEdit where
If a client neither supports `documentChanges` nor
`workspace.workspaceEdit.resourceOperations` then only plain `TextEdit`s
using the `changes` property are supported. -/
documentChanges? : Option (Array DocumentChange) := none
documentChanges : Array DocumentChange :=
/-- A map of change annotations that can be referenced in
`AnnotatedTextEdit`s or create, rename and delete file / folder
operations.
Whether clients honor this property depends on the client capability
`workspace.changeAnnotationSupport`. -/
changeAnnotations? : Option (RBMap String ChangeAnnotation compare) := none
changeAnnotations : RBMap String ChangeAnnotation compare :=
deriving ToJson, FromJson
namespace WorkspaceEdit
@@ -236,24 +236,24 @@ instance : EmptyCollection WorkspaceEdit := ⟨{}⟩
instance : Append WorkspaceEdit where
append x y := {
changes? :=
match x.changes?, y.changes? with
| v, none | none, v => v
| some x, some y => x.mergeBy (fun _ v₁ v₂ => v₁ ++ v₂) y
documentChanges? :=
match x.documentChanges?, y.documentChanges? with
| v, none | none, v => v
| some x, some y => x ++ y
changeAnnotations? :=
match x.changeAnnotations?, y.changeAnnotations? with
| v, none | none, v => v
| some x, some y => x.mergeBy (fun _ _v₁ v₂ => v₂) y
changes := x.changes.mergeBy (fun _ v₁ v₂ => v₁ ++ v₂) y.changes
documentChanges := x.documentChanges ++ y.documentChanges
changeAnnotations := x.changeAnnotations.mergeBy (fun _ _v₁ v => v) y.changeAnnotations
}
def ofTextDocumentEdit (e : TextDocumentEdit) : WorkspaceEdit :=
{ documentChanges? := #[DocumentChange.edit e]}
{ documentChanges := #[DocumentChange.edit e]}
def ofTextEdit (doc : VersionedTextDocumentIdentifier) (te : TextEdit) : WorkspaceEdit :=
def ofTextEdit (uri : DocumentUri) (te : TextEdit) : WorkspaceEdit :=
/- [note], there is a bug in vscode where not including the version will cause an error,
even though the version field is not used to validate the change.
References:
- [a fix in the wild](https://github.com/stylelint/vscode-stylelint/pull/330/files).
Note that the version field needs to be present, even if the value is `undefined`.
- [angry comment](https://github.com/tsqllint/tsqllint-vscode-extension/blob/727026fce9f8c6a33d113373666d0776f8f6c23c/server/src/server.ts#L70)
-/
let doc := {uri, version? := some 0}
ofTextDocumentEdit { textDocument := doc, edits := #[te]}
end WorkspaceEdit

View File

@@ -74,7 +74,6 @@ structure ServerCapabilities where
declarationProvider : Bool := false
typeDefinitionProvider : Bool := false
referencesProvider : Bool := false
renameProvider? : Option RenameOptions := none
workspaceSymbolProvider : Bool := false
foldingRangeProvider : Bool := false
semanticTokensProvider? : Option SemanticTokensOptions := none

View File

@@ -62,23 +62,8 @@ def readMessage : IpcM JsonRpc.Message := do
def readRequestAs (expectedMethod : String) (α) [FromJson α] : IpcM (Request α) := do
(stdout).readLspRequestAs expectedMethod α
/--
Reads response, discarding notifications in between. This function is meant
purely for testing where we use `collectDiagnostics` explicitly if we do care
about such notifications. -/
partial def readResponseAs (expectedID : RequestID) (α) [FromJson α] :
IpcM (Response α) := do
let m (stdout).readLspMessage
match m with
| Message.response id result =>
if id == expectedID then
match fromJson? result with
| Except.ok v => pure expectedID, v
| Except.error inner => throw $ userError s!"Unexpected result '{result.compress}'\n{inner}"
else
throw $ userError s!"Expected id {expectedID}, got id {id}"
| .notification .. => readResponseAs expectedID α
| _ => throw $ userError s!"Expected JSON-RPC response, got: '{(toJson m).compress}'"
def readResponseAs (expectedID : RequestID) (α) [FromJson α] : IpcM (Response α) := do
(stdout).readLspResponseAs expectedID α
def waitForExit : IpcM UInt32 := do
(read).wait

View File

@@ -351,16 +351,5 @@ structure FoldingRange where
kind? : Option FoldingRangeKind := none
deriving ToJson
structure RenameOptions where
prepareProvider : Bool := false
deriving FromJson, ToJson
structure RenameParams extends TextDocumentPositionParams where
newName : String
deriving FromJson, ToJson
structure PrepareRenameParams extends TextDocumentPositionParams
deriving FromJson, ToJson
end Lsp
end Lean

View File

@@ -69,10 +69,4 @@ def NameTrie.forMatchingM [Monad m] (t : NameTrie β) (k : Name) (f : β → m U
def NameTrie.forM [Monad m] (t : NameTrie β) (f : β m Unit) : m Unit :=
t.forMatchingM Name.anonymous f
def NameTrie.matchingToArray (t : NameTrie β) (k : Name) : Array β :=
Id.run <| t.foldMatchingM k #[] fun v acc => acc.push v
def NameTrie.toArray (t : NameTrie β) : Array β :=
Id.run <| t.foldM #[] fun v acc => acc.push v
end Lean

View File

@@ -1261,7 +1261,7 @@ def elabExplicitUnivs (lvls : Array Syntax) : TermElabM (List Level) := do
- When we elaborate choice nodes (and overloaded identifiers), we track multiple results using the `observing x` combinator.
The `observing x` executes `x` and returns a `TermElabResult`.
`observing x` does not check for synthetic sorry's, just an exception. Thus, it may think `x` worked when it didn't
`observing `x does not check for synthetic sorry's, just an exception. Thus, it may think `x` worked when it didn't
if a synthetic sorry was introduced. We decided that checking for synthetic sorrys at `observing` is not a good solution
because it would not be clear to decide what the "main" error message for the alternative is. When the result contains
a synthetic `sorry`, it is not clear which error message corresponds to the `sorry`. Moreover, while executing `x`, many

View File

@@ -118,64 +118,6 @@ private partial def elabChoiceAux (cmds : Array Syntax) (i : Nat) : CommandElabM
@[builtin_command_elab choice] def elabChoice : CommandElab := fun stx =>
elabChoiceAux stx.getArgs 0
/-- Declares one or more universe variables.
`universe u v`
`Prop`, `Type`, `Type u` and `Sort u` are types that classify other types, also known as
*universes*. In `Type u` and `Sort u`, the variable `u` stands for the universe's *level*, and a
universe at level `u` can only classify universes that are at levels lower than `u`. For more
details on type universes, please refer to [the relevant chapter of Theorem Proving in Lean][tpil
universes].
Just as type arguments allow polymorphic definitions to be used at many different types, universe
parameters, represented by universe variables, allow a definition to be used at any required level.
While Lean mostly handles universe levels automatically, declaring them explicitly can provide more
control when writing signatures. The `universe` keyword allows the declared universe variables to be
used in a collection of definitions, and Lean will ensure that these definitions use them
consistently.
[tpil universes]: https://lean-lang.org/theorem_proving_in_lean4/dependent_type_theory.html#types-as-objects
(Type universes on Theorem Proving in Lean)
```lean
/- Explicit type-universe parameter. -/
def id₁.{u} (α : Type u) (a : α) := a
/- Implicit type-universe parameter, equivalent to `id₁`.
Requires option `autoImplicit true`, which is the default. -/
def id₂ (α : Type u) (a : α) := a
/- Explicit standalone universe variable declaration, equivalent to `id₁` and `id₂`. -/
universe u
def id₃ (α : Type u) (a : α) := a
```
On a more technical note, using a universe variable only in the right-hand side of a definition
causes an error if the universe has not been declared previously.
```lean
def L₁.{u} := List (Type u)
-- def L₂ := List (Type u) -- error: `unknown universe level 'u'`
universe u
def L₃ := List (Type u)
```
## Examples
```lean
universe u v w
structure Pair (α : Type u) (β : Type v) : Type (max u v) where
a : α
b : β
#check Pair.{v, w}
-- Pair : Type v → Type w → Type (max v w)
```
-/
@[builtin_command_elab «universe»] def elabUniverse : CommandElab := fun n => do
n[1].forArgsM addUnivLevel
@@ -184,30 +126,6 @@ structure Pair (α : Type u) (β : Type v) : Type (max u v) where
| Except.ok env => setEnv env
| Except.error ex => throwError (ex.toMessageData ( getOptions))
/-- Adds names from other namespaces to the current namespace.
The command `export Some.Namespace (name₁ name₂)` makes `name₁` and `name₂`:
- visible in the current namespace without prefix `Some.Namespace`, like `open`, and
- visible from outside the current namespace `N` as `N.name₁` and `N.name₂`.
## Examples
```lean
namespace Morning.Sky
def star := "venus"
end Morning.Sky
namespace Evening.Sky
export Morning.Sky (star)
-- `star` is now in scope
#check star
end Evening.Sky
-- `star` is visible in `Evening.Sky`
#check Evening.Sky.star
```
-/
@[builtin_command_elab «export»] def elabExport : CommandElab := fun stx => do
let `(export $ns ($ids*)) := stx | throwUnsupportedSyntax
let nss resolveNamespace ns
@@ -217,123 +135,9 @@ end Evening.Sky
for idStx in ids do
let id := idStx.getId
let declName resolveNameUsingNamespaces nss idStx
if ( getInfoState).enabled then
addConstInfo idStx declName
aliases := aliases.push (currNamespace ++ id, declName)
modify fun s => { s with env := aliases.foldl (init := s.env) fun env p => addAlias env p.1 p.2 }
/-- Makes names from other namespaces visible without writing the namespace prefix.
Names that are made available with `open` are visible within the current `section` or `namespace`
block. This makes referring to (type) definitions and theorems easier, but note that it can also
make [scoped instances], notations, and attributes from a different namespace available.
The `open` command can be used in a few different ways:
* `open Some.Namespace.Path1 Some.Namespace.Path2` makes all non-protected names in
`Some.Namespace.Path1` and `Some.Namespace.Path2` available without the prefix, so that
`Some.Namespace.Path1.x` and `Some.Namespace.Path2.y` can be referred to by writing only `x` and
`y`.
* `open Some.Namespace.Path hiding def1 def2` opens all non-protected names in `Some.Namespace.Path`
except `def1` and `def2`.
* `open Some.Namespace.Path (def1 def2)` only makes `Some.Namespace.Path.def1` and
`Some.Namespace.Path.def2` available without the full prefix, so `Some.Namespace.Path.def3` would
be unaffected.
This works even if `def1` and `def2` are `protected`.
* `open Some.Namespace.Path renaming def1 → def1', def2 → def2'` same as `open Some.Namespace.Path
(def1 def2)` but `def1`/`def2`'s names are changed to `def1'`/`def2'`.
This works even if `def1` and `def2` are `protected`.
* `open scoped Some.Namespace.Path1 Some.Namespace.Path2` **only** opens [scoped instances],
notations, and attributes from `Namespace1` and `Namespace2`; it does **not** make any other name
available.
* `open <any of the open shapes above> in` makes the names `open`-ed visible only in the next
command or expression.
[scoped instance]: https://lean-lang.org/theorem_proving_in_lean4/type_classes.html#scoped-instances
(Scoped instances in Theorem Proving in Lean)
## Examples
```lean
/-- SKI combinators https://en.wikipedia.org/wiki/SKI_combinator_calculus -/
namespace Combinator.Calculus
def I (a : α) : α := a
def K (a : α) : β α := fun _ => a
def S (x : α β γ) (y : α β) (z : α) : γ := x z (y z)
end Combinator.Calculus
section
-- open everything under `Combinator.Calculus`, *i.e.* `I`, `K` and `S`,
-- until the section ends
open Combinator.Calculus
theorem SKx_eq_K : S K x = I := rfl
end
-- open everything under `Combinator.Calculus` only for the next command (the next `theorem`, here)
open Combinator.Calculus in
theorem SKx_eq_K' : S K x = I := rfl
section
-- open only `S` and `K` under `Combinator.Calculus`
open Combinator.Calculus (S K)
theorem SKxy_eq_y : S K x y = y := rfl
-- `I` is not in scope, we have to use its full path
theorem SKxy_eq_Iy : S K x y = Combinator.Calculus.I y := rfl
end
section
open Combinator.Calculus
renaming
I identity,
K konstant
#check identity
#check konstant
end
section
open Combinator.Calculus
hiding S
#check I
#check K
end
section
namespace Demo
inductive MyType
| val
namespace N1
scoped infix:68 "" => BEq.beq
scoped instance : BEq MyType where
beq _ _ := true
def Alias := MyType
end N1
end Demo
-- bring `≋` and the instance in scope, but not `Alias`
open scoped Demo.N1
#check Demo.MyType.val == Demo.MyType.val
#check Demo.MyType.val Demo.MyType.val
-- #check Alias -- unknown identifier 'Alias'
end
```
-/
@[builtin_command_elab «open»] def elabOpen : CommandElab
| `(open $decl:openDecl) => do
let openDecls elabOpenDecl decl
@@ -419,102 +223,6 @@ private def replaceBinderAnnotation (binder : TSyntax ``Parser.Term.bracketedBin
else
return #[binder]
/-- Declares one or more typed variables, or modifies whether already-declared variables are
implicit.
Introduces variables that can be used in definitions within the same `namespace` or `section` block.
When a definition mentions a variable, Lean will add it as an argument of the definition. The
`variable` command is also able to add typeclass parameters. This is useful in particular when
writing many definitions that have parameters in common (see below for an example).
Variable declarations have the same flexibility as regular function paramaters. In particular they
can be [explicit, implicit][binder docs], or [instance implicit][tpil classes] (in which case they
can be anonymous). This can be changed, for instance one can turn explicit variable `x` into an
implicit one with `variable {x}`. Note that currently, you should avoid changing how variables are
bound and declare new variables at the same time; see [issue 2789] for more on this topic.
See [*Variables and Sections* from Theorem Proving in Lean][tpil vars] for a more detailed
discussion.
[tpil vars]: https://lean-lang.org/theorem_proving_in_lean4/dependent_type_theory.html#variables-and-sections
(Variables and Sections on Theorem Proving in Lean)
[tpil classes]: https://lean-lang.org/theorem_proving_in_lean4/type_classes.html
(Type classes on Theorem Proving in Lean)
[binder docs]: https://leanprover-community.github.io/mathlib4_docs/Lean/Expr.html#Lean.BinderInfo
(Documentation for the BinderInfo type)
[issue 2789]: https://github.com/leanprover/lean4/issues/2789
(Issue 2789 on github)
## Examples
```lean
section
variable
{α : Type u} -- implicit
(a : α) -- explicit
[instBEq : BEq α] -- instance implicit, named
[Hashable α] -- instance implicit, anonymous
def isEqual (b : α) : Bool :=
a == b
#check isEqual
-- isEqual.{u} {α : Type u} (a : α) [instBEq : BEq α] (b : α) : Bool
variable
{a} -- `a` is implicit now
def eqComm {b : α} := a == b ↔ b == a
#check eqComm
-- eqComm.{u} {α : Type u} {a : α} [instBEq : BEq α] {b : α} : Prop
end
```
The following shows a typical use of `variable` to factor out definition arguments:
```lean
variable (Src : Type)
structure Logger where
trace : List (Src × String)
#check Logger
-- Logger (Src : Type) : Type
namespace Logger
-- switch `Src : Type` to be implicit until the `end Logger`
variable {Src}
def empty : Logger Src where
trace := []
#check empty
-- Logger.empty {Src : Type} : Logger Src
variable (log : Logger Src)
def len :=
log.trace.length
#check len
-- Logger.len {Src : Type} (log : Logger Src) : Nat
variable (src : Src) [BEq Src]
-- at this point all of `log`, `src`, `Src` and the `BEq` instance can all become arguments
def filterSrc :=
log.trace.filterMap
fun (src', str') => if src' == src then some str' else none
#check filterSrc
-- Logger.filterSrc {Src : Type} (log : Logger Src) (src : Src) [inst✝ : BEq Src] : List String
def lenSrc :=
log.filterSrc src |>.length
#check lenSrc
-- Logger.lenSrc {Src : Type} (log : Logger Src) (src : Src) [inst✝ : BEq Src] : Nat
end Logger
```
-/
@[builtin_command_elab «variable»] def elabVariable : CommandElab
| `(variable $binders*) => do
-- Try to elaborate `binders` for sanity checking

View File

@@ -252,21 +252,11 @@ def elabCDotFunctionAlias? (stx : Term) : TermElabM (Option Expr) := do
try Term.resolveId? f catch _ => return none
else
return none
| `(fun $binders* => binop% $f $a $b)
| `(fun $binders* => binop_lazy% $f $a $b)
| `(fun $binders* => leftact% $f $a $b)
| `(fun $binders* => rightact% $f $a $b)
| `(fun $binders* => binrel% $f $a $b)
| `(fun $binders* => binrel_no_prop% $f $a $b) =>
| `(fun $binders* => binop% $f $a $b) =>
if binders == #[a, b] then
try Term.resolveId? f catch _ => return none
else
return none
| `(fun $binders* => unop% $f $a) =>
if binders == #[a] then
try Term.resolveId? f catch _ => return none
else
return none
| _ => return none
where
expandCDotArg? (stx : Term) : MacroM (Option Term) :=

View File

@@ -24,15 +24,15 @@ where
| [] => ``(isTrue rfl)
| (a, b, recField, isProof) :: todo => withFreshMacroScope do
let rhs if isProof then
`(have h : @$a = @$b := rfl; by subst h; exact $( mkSameCtorRhs todo):term)
`(have h : $a = $b := rfl; by subst h; exact $( mkSameCtorRhs todo):term)
else
`(if h : @$a = @$b then
`(if h : $a = $b then
by subst h; exact $( mkSameCtorRhs todo):term
else
isFalse (by intro n; injection n; apply h _; assumption))
if let some auxFunName := recField then
-- add local instance for `a = b` using the function being defined `auxFunName`
`(let inst := $(mkIdent auxFunName) @$a @$b; $rhs)
`(let inst := $(mkIdent auxFunName) $a $b; $rhs)
else
return rhs
@@ -113,11 +113,7 @@ def mkDecEq (declName : Name) : CommandElabM Bool := do
return false -- nested inductive types are not supported yet
else
let cmds liftTermElabM <| mkDecEqCmds indVal
-- `cmds` can have a number of syntax nodes quadratic in the number of constructors
-- and thus create as many info tree nodes, which we never make use of but which can
-- significantly slow down e.g. the unused variables linter; avoid creating them
withEnableInfoTree false do
cmds.forM elabCommand
cmds.forM elabCommand
return true
partial def mkEnumOfNat (declName : Name) : MetaM Unit := do

View File

@@ -1,7 +1,7 @@
/-
Copyright (c) 2021 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Leonardo de Moura, Kyle Miller, Sebastian Ullrich
Authors: Leonardo de Moura
-/
import Lean.Elab.App
import Lean.Elab.BuiltinNotation
@@ -79,30 +79,13 @@ private def throwForInFailure (forInInstance : Expr) : TermElabM Expr :=
namespace Op
/-!
The elaborator for expression trees of `binop%`, `binop_lazy%`, `leftact%`, `rightact%`, and `unop%` terms.
The elaborator for `binop%`, `binop_lazy%`, and `unop%` terms.
At a high level, the elaborator tries to solve for a type that each of the operands in the expression tree
can be coerced to, while taking into account the expected type for the entire expression tree.
Once this type is computed (and if it exists), it inserts coercions where needed.
Here are brief descriptions of each of the operator types:
- `binop% f a b` elaborates `f a b` as a binary operator with two operands `a` and `b`,
and each operand participates in the protocol.
- `binop_lazy% f a b` is like `binop%` but elaborates as `f a (fun () => b)`.
- `unop% f a` elaborates `f a` as a unary operator with one operand `a`, which participates in the protocol.
- `leftact% f a b` elaborates `f a b` as a left action (the `a` operand "acts upon" the `b` operand).
Only `b` participates in the protocol since `a` can have an unrelated type, for example scalar multiplication of vectors.
- `rightact% f a b` elaborates `f a b` as a right action (the `b` operand "acts upon" the `a` operand).
Only `a` participates in the protocol since `b` can have an unrelated type.
This is used by `HPow` since, for example, there are both `Real -> Nat -> Real` and `Real -> Real -> Real`
exponentiation functions, and we prefer the former in the case of `x ^ 2`, but `binop%` would choose the latter. (#2220)
- There are also `binrel%` and `binrel_no_prop%` (see the docstring for `elabBinRelCore`).
The elaborator works as follows:
It works as follows:
1- Expand macros.
2- Convert `Syntax` object corresponding to the `binop%/...` term into a `Tree`.
The `toTree` method visits nested `binop%/...` terms and parentheses.
2- Convert `Syntax` object corresponding to the `binop%` (`binop_lazy%` and `unop%`) term into a `Tree`.
The `toTree` method visits nested `binop%` (`binop_lazy%` and `unop%`) terms and parentheses.
3- Synthesize pending metavariables without applying default instances and using the
`(mayPostpone := true)`.
4- Tries to compute a maximal type for the tree computed at step 2.
@@ -134,29 +117,18 @@ coercions inside of a `HAdd` instance.
Remarks:
* In the new `binop%` and related elaborators the decision whether a coercion will be inserted or not
is made at `binop%` elaboration time. This was not the case in the old elaborator.
For example, an instance, such as `HAdd Int ?m ?n`, could be created when executing
the `binop%` elaborator, and only resolved much later. We try to minimize this problem
by synthesizing pending metavariables at step 3.
In the new `binop%` and related elaborators the decision whether a coercion will be inserted or not
is made at `binop%` elaboration time. This was not the case in the old elaborator.
For example, an instance, such as `HAdd Int ?m ?n`, could be created when executing
the `binop%` elaborator, and only resolved much later. We try to minimize this problem
by synthesizing pending metavariables at step 3.
* For types containing heterogeneous operators (e.g., matrix multiplication), step 4 will fail
and we will skip coercion insertion. For example, `x : Matrix Real 5 4` and `y : Matrix Real 4 8`,
there is no coercion `Matrix Real 5 4` from `Matrix Real 4 8` and vice-versa, but
`x * y` is elaborated successfully and has type `Matrix Real 5 8`.
* The `leftact%` and `rightact%` elaborators are to handle binary operations where only one of
the arguments participates in the protocol. For example, in `2 ^ n + y` with `n : Nat` and `y : Real`,
we do not want to coerce `n` to be a real as well, but we do want to elaborate `2 : Real`.
For types containing heterogeneous operators (e.g., matrix multiplication), step 4 will fail
and we will skip coercion insertion. For example, `x : Matrix Real 5 4` and `y : Matrix Real 4 8`,
there is no coercion `Matrix Real 5 4` from `Matrix Real 4 8` and vice-versa, but
`x * y` is elaborated successfully and has type `Matrix Real 5 8`.
-/
private inductive BinOpKind where
| regular -- `binop%`
| lazy -- `binop_lazy%`
| leftact -- `leftact%`
| rightact -- `rightact%`
deriving BEq
private inductive Tree where
/--
Leaf of the tree.
@@ -165,9 +137,9 @@ private inductive Tree where
-/
| term (ref : Syntax) (infoTrees : PersistentArray InfoTree) (val : Expr)
/--
`ref` is the original syntax that expanded into `binop%/...`.
`ref` is the original syntax that expanded into `binop%`.
-/
| binop (ref : Syntax) (kind : BinOpKind) (f : Expr) (lhs rhs : Tree)
| binop (ref : Syntax) (lazy : Bool) (f : Expr) (lhs rhs : Tree)
/--
`ref` is the original syntax that expanded into `unop%`.
-/
@@ -192,11 +164,9 @@ private partial def toTree (s : Syntax) : TermElabM Tree := do
where
go (s : Syntax) := do
match s with
| `(binop% $f $lhs $rhs) => processBinOp s .regular f lhs rhs
| `(binop_lazy% $f $lhs $rhs) => processBinOp s .lazy f lhs rhs
| `(binop% $f $lhs $rhs) => processBinOp (lazy := false) s f lhs rhs
| `(binop_lazy% $f $lhs $rhs) => processBinOp (lazy := true) s f lhs rhs
| `(unop% $f $arg) => processUnOp s f arg
| `(leftact% $f $lhs $rhs) => processBinOp s .leftact f lhs rhs
| `(rightact% $f $lhs $rhs) => processBinOp s .rightact f lhs rhs
| `(($e)) =>
if hasCDot e then
processLeaf s
@@ -211,12 +181,9 @@ where
return .macroExpansion macroName s s' ( go s')
| none => processLeaf s
processBinOp (ref : Syntax) (kind : BinOpKind) (f lhs rhs : Syntax) := do
processBinOp (ref : Syntax) (f lhs rhs : Syntax) (lazy : Bool) := do
let some f resolveId? f | throwUnknownConstant f.getId
-- treat corresponding argument as leaf for `leftact/rightact`
let lhs if kind == .leftact then processLeaf lhs else go lhs
let rhs if kind == .rightact then processLeaf rhs else go rhs
return .binop ref kind f lhs rhs
return .binop (lazy := lazy) ref f ( go lhs) ( go rhs)
processUnOp (ref : Syntax) (f arg : Syntax) := do
let some f resolveId? f | throwUnknownConstant f.getId
@@ -262,8 +229,6 @@ where
unless ( get).hasUncomparable do
match t with
| .macroExpansion _ _ _ nested => go nested
| .binop _ .leftact _ _ rhs => go rhs
| .binop _ .rightact _ lhs _ => go lhs
| .binop _ _ _ lhs rhs => go lhs; go rhs
| .unop _ _ arg => go arg
| .term _ _ val =>
@@ -294,9 +259,9 @@ private def toExprCore (t : Tree) : TermElabM Expr := do
match t with
| .term _ trees e =>
modifyInfoState (fun s => { s with trees := s.trees ++ trees }); return e
| .binop ref kind f lhs rhs =>
| .binop ref lazy f lhs rhs =>
withRef ref <| withInfoContext' ref (mkInfo := mkTermInfo .anonymous ref) do
mkBinOp (kind == .lazy) f ( toExprCore lhs) ( toExprCore rhs)
mkBinOp lazy f ( toExprCore lhs) ( toExprCore rhs)
| .unop ref f arg =>
withRef ref <| withInfoContext' ref (mkInfo := mkTermInfo .anonymous ref) do
mkUnOp f ( toExprCore arg)
@@ -382,11 +347,7 @@ mutual
where
go (t : Tree) (f? : Option Expr) (lhs : Bool) (isPred : Bool) : TermElabM Tree := do
match t with
| .binop ref .leftact f lhs rhs =>
return .binop ref .leftact f lhs ( go rhs none false false)
| .binop ref .rightact f lhs rhs =>
return .binop ref .rightact f ( go lhs none false false) rhs
| .binop ref kind f lhs rhs =>
| .binop ref lazy f lhs rhs =>
/-
We only keep applying coercions to `maxType` if `f` is predicate or
`f` has a homogenous instance with `maxType`. See `hasHomogeneousInstance` for additional details.
@@ -394,10 +355,10 @@ mutual
Remark: We assume `binrel%` elaborator is only used with homogenous predicates.
-/
if ( pure isPred <||> hasHomogeneousInstance f maxType) then
return .binop ref kind f ( go lhs f true false) ( go rhs f false false)
return .binop ref lazy f ( go lhs f true false) ( go rhs f false false)
else
let r withRef ref do
mkBinOp (kind == .lazy) f ( toExpr lhs none) ( toExpr rhs none)
mkBinOp lazy f ( toExpr lhs none) ( toExpr rhs none)
let infoTrees getResetInfoTrees
return .term ref infoTrees r
| .unop ref f arg =>
@@ -435,24 +396,22 @@ end
def elabOp : TermElab := fun stx expectedType? => do
toExpr ( toTree stx) expectedType?
@[builtin_term_elab binop] def elabBinOp : TermElab := elabOp
@[builtin_term_elab binop_lazy] def elabBinOpLazy : TermElab := elabOp
@[builtin_term_elab leftact] def elabLeftact : TermElab := elabOp
@[builtin_term_elab rightact] def elabRightact : TermElab := elabOp
@[builtin_term_elab unop] def elabUnOp : TermElab := elabOp
@[builtin_term_elab binop]
def elabBinOp : TermElab := elabOp
@[builtin_term_elab binop_lazy]
def elabBinOpLazy : TermElab := elabOp
@[builtin_term_elab unop]
def elabUnOp : TermElab := elabOp
/--
Elaboration functions for `binrel%` and `binrel_no_prop%` notations.
Elaboration functionf for `binrel%` and `binrel_no_prop%` notations.
We use the infrastructure for `binop%` to make sure we propagate information between the left and right hand sides
of a binary relation.
- `binrel% R x y` elaborates `R x y` using the `binop%/...` expression trees in both `x` and `y`.
It is similar to how `binop% R x y` elaborates but with a significant difference:
it does not use the expected type when computing the types of the operads.
- `binrel_no_prop% R x y` elaborates `R x y` like `binrel% R x y`, but if the resulting type for `x` and `y`
is `Prop` they are coerced to `Bool`.
This is used for relations such as `==` which do not support `Prop`, but we still want
to be able to write `(5 > 2) == (2 > 1)` for example.
Recall that the `binrel_no_prop%` notation is used for relations such as `==` which do not support `Prop`, but
we still want to be able to write `(5 > 2) == (2 > 1)`.
-/
def elabBinRelCore (noProp : Bool) (stx : Syntax) (expectedType? : Option Expr) : TermElabM Expr := do
match ( resolveId? stx[1]) with
@@ -489,7 +448,7 @@ def elabBinRelCore (noProp : Bool) (stx : Syntax) (expectedType? : Option Expr)
-/
let lhs withRef stx[2] <| toTree stx[2]
let rhs withRef stx[3] <| toTree stx[3]
let tree := .binop stx .regular f lhs rhs
let tree := .binop (lazy := false) stx f lhs rhs
let r analyze tree none
trace[Elab.binrel] "hasUncomparable: {r.hasUncomparable}, maxType: {r.max?}"
if r.hasUncomparable || r.max?.isNone then

View File

@@ -102,8 +102,7 @@ def runFrontend
: IO (Environment × Bool) := do
let inputCtx := Parser.mkInputContext input fileName
let (header, parserState, messages) Parser.parseHeader inputCtx
-- allow `env` to be leaked, which would live until the end of the process anyway
let (env, messages) processHeader (leakEnv := true) header opts messages inputCtx trustLevel
let (env, messages) processHeader header opts messages inputCtx trustLevel
let env := env.setMainModule mainModuleName
let mut commandState := Command.mkState env messages opts

View File

@@ -16,11 +16,10 @@ def headerToImports (header : Syntax) : Array Import :=
let id := stx[2].getId
{ module := id, runtimeOnly := runtime }
def processHeader (header : Syntax) (opts : Options) (messages : MessageLog)
(inputCtx : Parser.InputContext) (trustLevel : UInt32 := 0) (leakEnv := false)
def processHeader (header : Syntax) (opts : Options) (messages : MessageLog) (inputCtx : Parser.InputContext) (trustLevel : UInt32 := 0)
: IO (Environment × MessageLog) := do
try
let env importModules (leakEnv := leakEnv) (headerToImports header) opts trustLevel
let env importModules (headerToImports header) opts trustLevel
pure (env, messages)
catch e =>
let env mkEmptyEnvironment

View File

@@ -57,7 +57,7 @@ private def resolveNameUsingNamespacesCore (nss : List Name) (idStx : Syntax) :
else
withRef idStx do throwError "ambiguous identifier '{idStx.getId}', possible interpretations: {result.map mkConst}"
def elabOpenDecl [MonadResolveName m] [MonadInfoTree m] (stx : TSyntax ``Parser.Command.openDecl) : m (List OpenDecl) := do
def elabOpenDecl [MonadResolveName m] (stx : TSyntax ``Parser.Command.openDecl) : m (List OpenDecl) := do
StateRefT'.run' (s := { openDecls := ( getOpenDecls), currNamespace := ( getCurrNamespace) }) do
match stx with
| `(Parser.Command.openDecl| $nss*) =>
@@ -73,25 +73,18 @@ def elabOpenDecl [MonadResolveName m] [MonadInfoTree m] (stx : TSyntax ``Parser.
let nss resolveNamespace ns
for idStx in ids do
let declName resolveNameUsingNamespacesCore nss idStx
if ( getInfoState).enabled then
addConstInfo idStx declName
addOpenDecl (OpenDecl.explicit idStx.getId declName)
| `(Parser.Command.openDecl| $ns hiding $ids*) =>
let ns resolveUniqueNamespace ns
activateScoped ns
for id in ids do
let declName resolveId ns id
if ( getInfoState).enabled then
addConstInfo id declName
let _ resolveId ns id
let ids := ids.map (·.getId) |>.toList
addOpenDecl (OpenDecl.simple ns ids)
| `(Parser.Command.openDecl| $ns renaming $[$froms -> $tos],*) =>
let ns resolveUniqueNamespace ns
for («from», to) in froms.zip tos do
let declName resolveId ns «from»
if ( getInfoState).enabled then
addConstInfo «from» declName
addConstInfo to declName
addOpenDecl (OpenDecl.explicit to.getId declName)
| _ => throwUnsupportedSyntax
return ( get).openDecls

View File

@@ -62,7 +62,7 @@ def applyAttributesOf (preDefs : Array PreDefinition) (applicationTime : Attribu
for preDef in preDefs do
applyAttributesAt preDef.declName preDef.modifiers.attrs applicationTime
def abstractNestedProofs (preDef : PreDefinition) : MetaM PreDefinition := withRef preDef.ref do
def abstractNestedProofs (preDef : PreDefinition) : MetaM PreDefinition :=
if preDef.kind.isTheorem || preDef.kind.isExample then
pure preDef
else do

View File

@@ -113,12 +113,12 @@ private partial def replaceRecApps (recFnName : Name) (recArgInfo : RecArgInfo)
| Expr.letE n type val body _ =>
withLetDecl n ( loop below type) ( loop below val) fun x => do
mkLetFVars #[x] ( loop below (body.instantiate1 x)) (usedLetOnly := false)
| Expr.mdata d b =>
if let some stx := getRecAppSyntax? e then
withRef stx <| loop below b
| Expr.mdata d b =>
if let some _ := getRecAppSyntax? e then
loop below b
else
return mkMData d ( loop below b)
| Expr.proj n i e => return mkProj n i ( loop below e)
| Expr.proj n i e => return mkProj n i ( loop below e)
| Expr.app _ _ =>
let processApp (e : Expr) : StateRefT (HasConstCache recFnName) M Expr :=
e.withApp fun f args => do

View File

@@ -23,11 +23,7 @@ private partial def replaceIndPredRecApps (recFnName : Name) (recArgInfo : RecAr
| Expr.letE n type val body _ =>
withLetDecl n ( loop type) ( loop val) fun x => do
mkLetFVars #[x] ( loop (body.instantiate1 x))
| Expr.mdata d b => do
if let some stx := getRecAppSyntax? e then
withRef stx <| loop b
else
return mkMData d ( loop b)
| Expr.mdata d e => return mkMData d ( loop e)
| Expr.proj n i e => return mkProj n i ( loop e)
| Expr.app _ _ =>
let processApp (e : Expr) : M Expr := do

View File

@@ -83,22 +83,19 @@ def structuralRecursion (preDefs : Array PreDefinition) : TermElabM Unit :=
else do
let ((recArgPos, preDefNonRec), state) run <| elimRecursion preDefs[0]!
let preDefNonRec eraseRecAppSyntax preDefNonRec
let mut preDef eraseRecAppSyntax preDefs[0]!
let preDef eraseRecAppSyntax preDefs[0]!
state.addMatchers.forM liftM
mapError (addNonRec preDefNonRec (applyAttrAfterCompilation := false)) fun msg =>
m!"structural recursion failed, produced type incorrect term{indentD msg}"
-- We create the `_unsafe_rec` before we abstract nested proofs.
-- Reason: the nested proofs may be referring to the _unsafe_rec.
addAndCompilePartialRec #[preDef]
unless preDef.kind.isTheorem do
unless ( isProp preDef.type) do
preDef abstractNestedProofs preDef
/-
Don't save predefinition info for equation generator
for theorems and definitions that are propositions.
See issue #2327
-/
registerEqnsInfo preDef recArgPos
mapError (addNonRec preDefNonRec (applyAttrAfterCompilation := false)) fun msg =>
m!"structural recursion failed, produced type incorrect term{indentD msg}"
addAndCompilePartialRec #[preDef]
addSmartUnfoldingDef preDef recArgPos
applyAttributesOf #[preDefNonRec] AttributeApplicationTime.afterCompilation

View File

@@ -4,7 +4,6 @@ Released under Apache 2.0 license as described in the file LICENSE.
Authors: Leonardo de Moura
-/
import Lean.Meta.Transform
import Lean.Elab.RecAppSyntax
namespace Lean.Elab.Structural
open Meta
@@ -16,39 +15,21 @@ private def shouldBetaReduce (e : Expr) (recFnName : Name) : Bool :=
false
/--
Preprocesses the expessions to improve the effectiveness of `elimRecursion`.
* Beta reduce terms where the recursive function occurs in the lambda term.
Beta reduce terms where the recursive function occurs in the lambda term.
This is useful to improve the effectiveness of `elimRecursion`.
Example:
```
def f : Nat → Nat
| 0 => 1
| i+1 => (fun x => f x) i
```
* Floats out the RecApp markers.
Example:
```
def f : Nat → Nat
| 0 => 1
| i+1 => (f x) i
```
-/
def preprocess (e : Expr) (recFnName : Name) : CoreM Expr :=
Core.transform e
(pre := fun e =>
if shouldBetaReduce e recFnName then
return .visit e.headBeta
else
return .continue)
(post := fun e =>
match e with
| .app (.mdata m f) a =>
if m.isRecApp then
return .done (.mdata m (.app f a))
else
return .done e
| _ => return .done e)
fun e =>
if shouldBetaReduce e recFnName then
return .visit e.headBeta
else
return .continue
end Lean.Elab.Structural

View File

@@ -20,8 +20,7 @@ open Meta
private def applyDefaultDecrTactic (mvarId : MVarId) : TermElabM Unit := do
let remainingGoals Tactic.run mvarId do
Tactic.evalTactic ( `(tactic| decreasing_tactic))
unless remainingGoals.isEmpty do
Term.reportUnsolvedGoals remainingGoals
remainingGoals.forM fun mvarId => Term.reportUnsolvedGoals [mvarId]
private def mkDecreasingProof (decreasingProp : Expr) (decrTactic? : Option Syntax) : TermElabM Expr := do
let mvar mkFreshExprSyntheticOpaqueMVar decreasingProp

View File

@@ -1,598 +0,0 @@
/-
Copyright (c) 2023 Lean FRO, LLC. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Joachim Breitner
-/
import Lean.Util.HasConstCache
import Lean.Meta.CasesOn
import Lean.Meta.Match.Match
import Lean.Meta.Tactic.Cleanup
import Lean.Meta.Tactic.Refl
import Lean.Elab.Quotation
import Lean.Elab.RecAppSyntax
import Lean.Elab.PreDefinition.Basic
import Lean.Elab.PreDefinition.Structural.Basic
import Lean.Elab.PreDefinition.WF.TerminationHint
/-!
This module finds lexicographic termination arguments for well-founded recursion.
Starting with basic measures (`sizeOf xᵢ` for all parameters `xᵢ`), it tries all combinations
until it finds one where all proof obligations go through with the given tactic (`decerasing_by`),
if given, or the default `decreasing_tactic`.
For mutual recursion, a single measure is not just one parameter, but one from each recursive
function. Enumerating these can lead to a combinatoric explosion, so we bound
the nubmer of measures tried.
In addition to measures derived from `sizeOf xᵢ`, it also considers measures
that assign an order to the functions themselves. This way we can support mutual
function definitions where no arguments decrease from one function to another.
The result of this module is a `TerminationWF`, which is then passed on to `wfRecursion`; this
design is crucial so that whatever we infer in this module could also be written manually by the
user. It would be bad if there are function definitions that can only be processed with the
guessed lexicographic order.
The following optimizations are applied to make this feasible:
1. The crucial optimiziation is to look at each argument of each recursive call
_once_, try to prove `<` and (if that fails `≤`), and then look at that table to
pick a suitable measure.
2. The next-crucial optimization is to fill that table lazily. This way, we run the (likely
expensive) tactics as few times as possible, while still being able to consider a possibly
large number of combinations.
3. Before we even try to prove `<`, we check if the arguments are equal (`=`). No well-founded
measure will relate equal terms, likely this check is faster than firing up the tactic engine,
and it adds more signal to the output.
4. Instead of traversing the whole function body over and over, we traverse it once and store
the arguments (in unpacked form) and the local `MetaM` state at each recursive call
(see `collectRecCalls`), which we then re-use for the possibly many proof attempts.
The logic here is based on “Finding Lexicographic Orders for Termination Proofs in Isabelle/HOL”
by Lukas Bulwahn, Alexander Krauss, and Tobias Nipkow, 10.1007/978-3-540-74591-4_5
<https://www21.in.tum.de/~nipkow/pubs/tphols07.pdf>.
-/
set_option autoImplicit false
open Lean Meta Elab
namespace Lean.Elab.WF.GuessLex
/--
Given a predefinition, find good variable names for its parameters.
Use user-given parameter names if present; use x1...xn otherwise.
The length of the returned array is also used to determine the arity
of the function, so it should match what `packDomain` does.
-/
def naryVarNames (fixedPrefixSize : Nat) (preDef : PreDefinition) : MetaM (Array Name):= do
lambdaTelescope preDef.value fun xs _ => do
let xs := xs.extract fixedPrefixSize xs.size
let mut ns := #[]
for h : i in [:xs.size] do
let n (xs[i]'h.2).fvarId!.getUserName
if n.hasMacroScopes then
ns := ns.push ( mkFreshUserName (.mkSimple s!"x{i+1}"))
else
ns := ns.push n
return ns
/-- Internal monad used by `withRecApps` -/
abbrev M (recFnName : Name) (α β : Type) : Type :=
StateRefT (Array α) (StateRefT (HasConstCache recFnName) MetaM) β
/--
Traverses the given expression `e`, and invokes the continuation `k`
at every saturated call to `recFnName`.
The expression `param` is passed along, and refined when going under a matcher
or `casesOn` application.
-/
partial def withRecApps {α} (recFnName : Name) (fixedPrefixSize : Nat) (param : Expr) (e : Expr)
(k : Expr Array Expr MetaM α) : MetaM (Array α) := do
trace[Elab.definition.wf] "withRecApps: {indentExpr e}"
let (_, as) loop param e |>.run #[] |>.run' {}
return as
where
processRec (param : Expr) (e : Expr) : M recFnName α Unit := do
if e.getAppNumArgs < fixedPrefixSize + 1 then
loop param ( etaExpand e)
else
let a k param e.getAppArgs
modifyThe (Array α) (·.push a)
processApp (param : Expr) (e : Expr) : M recFnName α Unit := do
e.withApp fun f args => do
args.forM (loop param)
if f.isConstOf recFnName then
processRec param e
else
loop param f
containsRecFn (e : Expr) : M recFnName α Bool := do
modifyGetThe (HasConstCache recFnName) (·.contains e)
loop (param : Expr) (e : Expr) : M recFnName α Unit := do
if !( containsRecFn e) then
return
match e with
| Expr.lam n d b c =>
loop param d
withLocalDecl n c d fun x => do
loop param (b.instantiate1 x)
| Expr.forallE n d b c =>
loop param d
withLocalDecl n c d fun x => do
loop param (b.instantiate1 x)
| Expr.letE n type val body _ =>
loop param type
loop param val
withLetDecl n type val fun x => do
loop param (body.instantiate1 x)
| Expr.mdata _d b =>
if let some stx := getRecAppSyntax? e then
withRef stx <| loop param b
else
loop param b
| Expr.proj _n _i e => loop param e
| Expr.const .. => if e.isConstOf recFnName then processRec param e
| Expr.app .. =>
match ( matchMatcherApp? e) with
| some matcherApp =>
if !Structural.recArgHasLooseBVarsAt recFnName fixedPrefixSize e then
processApp param e
else
if let some altParams matcherApp.refineThrough? param then
(Array.zip matcherApp.alts (Array.zip matcherApp.altNumParams altParams)).forM
fun (alt, altNumParam, altParam) =>
lambdaTelescope altParam fun xs altParam => do
-- TODO: Use boundedLambdaTelescope
unless altNumParam = xs.size do
throwError "unexpected `casesOn` application alternative{indentExpr alt}\nat application{indentExpr e}"
let altBody := alt.beta xs
loop altParam altBody
else
processApp param e
| none =>
match ( toCasesOnApp? e) with
| some casesOnApp =>
if !Structural.recArgHasLooseBVarsAt recFnName fixedPrefixSize e then
processApp param e
else
if let some altParams casesOnApp.refineThrough? param then
(Array.zip casesOnApp.alts (Array.zip casesOnApp.altNumParams altParams)).forM
fun (alt, altNumParam, altParam) =>
lambdaTelescope altParam fun xs altParam => do
-- TODO: Use boundedLambdaTelescope
unless altNumParam = xs.size do
throwError "unexpected `casesOn` application alternative{indentExpr alt}\nat application{indentExpr e}"
let altBody := alt.beta xs
loop altParam altBody
else
processApp param e
| none => processApp param e
| e => do
let _ ensureNoRecFn recFnName e
/--
A `SavedLocalContext` captures the state and local context of a `MetaM`, to be continued later.
-/
structure SavedLocalContext where
savedLocalContext : LocalContext
savedLocalInstances : LocalInstances
savedState : Meta.SavedState
/-- Capture the `MetaM` state including local context. -/
def SavedLocalContext.create : MetaM SavedLocalContext := do
let savedLocalContext getLCtx
let savedLocalInstances getLocalInstances
let savedState saveState
return { savedLocalContext, savedLocalInstances, savedState }
/-- Run a `MetaM` action in the saved state. -/
def SavedLocalContext.run {α} (slc : SavedLocalContext) (k : MetaM α) :
MetaM α :=
withoutModifyingState $ do
withLCtx slc.savedLocalContext slc.savedLocalInstances do
slc.savedState.restore
k
/-- A `RecCallWithContext` focuses on a single recursive call in a unary predefinition,
and runs the given action in the context of that call. -/
structure RecCallWithContext where
/-- Function index of caller -/
caller : Nat
/-- Parameters of caller -/
params : Array Expr
/-- Function index of callee -/
callee : Nat
/-- Arguments to callee -/
args : Array Expr
ctxt : SavedLocalContext
/-- Store the current recursive call and its context. -/
def RecCallWithContext.create (caller : Nat) (params : Array Expr) (callee : Nat) (args : Array Expr) :
MetaM RecCallWithContext := do
return { caller, params, callee, args, ctxt := ( SavedLocalContext.create) }
/-- Given the packed argument of a (possibly) mutual and (possibly) nary call,
return the function index that is called and the arguments individually.
We expect precisely the expressions produced by `packMutual`, with manifest
`PSum.inr`, `PSum.inl` and `PSigma.mk` constructors, and thus take them apart
rather than using projectinos. -/
def unpackArg {m} [Monad m] [MonadError m] (arities : Array Nat) (e : Expr) :
m (Nat × Array Expr) := do
-- count PSum injections to find out which function is doing the call
let mut funidx := 0
let mut e := e
while funidx + 1 < arities.size do
if e.isAppOfArity ``PSum.inr 3 then
e := e.getArg! 2
funidx := funidx + 1
else if e.isAppOfArity ``PSum.inl 3 then
e := e.getArg! 2
break
else
throwError "Unexpected expression while unpacking mutual argument"
-- now unpack PSigmas
let arity := arities[funidx]!
let mut args := #[]
while args.size + 1 < arity do
if e.isAppOfArity ``PSigma.mk 4 then
args := args.push (e.getArg! 2)
e := e.getArg! 3
else
throwError "Unexpected expression while unpacking n-ary argument"
args := args.push e
return (funidx, args)
/-- Traverse a unary PreDefinition, and returns a `WithRecCall` closure for each recursive
call site.
-/
def collectRecCalls (unaryPreDef : PreDefinition) (fixedPrefixSize : Nat) (arities : Array Nat)
: MetaM (Array RecCallWithContext) := withoutModifyingState do
addAsAxiom unaryPreDef
lambdaTelescope unaryPreDef.value fun xs body => do
unless xs.size == fixedPrefixSize + 1 do
-- Maybe cleaner to have lambdaBoundedTelescope?
throwError "Unexpected number of lambdas in unary pre-definition"
-- trace[Elab.definition.wf] "collectRecCalls: {xs} {body}"
let param := xs[fixedPrefixSize]!
withRecApps unaryPreDef.declName fixedPrefixSize param body fun param args => do
unless args.size fixedPrefixSize + 1 do
throwError "Insufficient arguments in recursive call"
let arg := args[fixedPrefixSize]!
let (caller, params) unpackArg arities param
let (callee, args) unpackArg arities arg
RecCallWithContext.create caller params callee args
/-- A `GuessLexRel` described how a recursive call affects a measure; whether it
decreases strictly, non-strictly, is equal, or else. -/
inductive GuessLexRel | lt | eq | le | no_idea
deriving Repr, DecidableEq
instance : ToFormat GuessLexRel where
format | .lt => "<"
| .eq => "="
| .le => ""
| .no_idea => "?"
/-- Given a `GuessLexRel`, produce a binary `Expr` that relates two `Nat` values accordingly. -/
def GuessLexRel.toNatRel : GuessLexRel Expr
| lt => mkAppN (mkConst ``LT.lt [levelZero]) #[mkConst ``Nat, mkConst ``instLTNat]
| eq => mkAppN (mkConst ``Eq [levelOne]) #[mkConst ``Nat]
| le => mkAppN (mkConst ``LE.le [levelZero]) #[mkConst ``Nat, mkConst ``instLENat]
| no_idea => unreachable!
/-- Given an expression `e`, produce `sizeOf e` with a suitable instance. -/
def mkSizeOf (e : Expr) : MetaM Expr := do
let ty inferType e
let lvl getLevel ty
let inst synthInstance (mkAppN (mkConst ``SizeOf [lvl]) #[ty])
let res := mkAppN (mkConst ``sizeOf [lvl]) #[ty, inst, e]
check res
return res
/--
For a given recursive call, and a choice of parameter and argument index,
try to prove equality, < or ≤.
-/
def evalRecCall (decrTactic? : Option Syntax) (rcc : RecCallWithContext) (paramIdx argIdx : Nat) :
MetaM GuessLexRel := do
rcc.ctxt.run do
let param := rcc.params[paramIdx]!
let arg := rcc.args[argIdx]!
trace[Elab.definition.wf] "inspectRecCall: {rcc.caller} ({param}) → {rcc.callee} ({arg})"
let arg mkSizeOf rcc.args[argIdx]!
let param mkSizeOf rcc.params[paramIdx]!
for rel in [GuessLexRel.eq, .lt, .le] do
let goalExpr := mkAppN rel.toNatRel #[arg, param]
trace[Elab.definition.wf] "Goal for {rel}: {goalExpr}"
check goalExpr
let mvar mkFreshExprSyntheticOpaqueMVar goalExpr
let mvarId := mvar.mvarId!
let mvarId mvarId.cleanup
-- logInfo m!"Remaining goals: {goalsToMessageData [mvarId]}"
try
if rel = .eq then
MVarId.refl mvarId
else do
Lean.Elab.Term.TermElabM.run' do
match decrTactic? with
| none =>
let remainingGoals Tactic.run mvarId do
Tactic.evalTactic ( `(tactic| decreasing_tactic))
remainingGoals.forM fun mvarId => Term.reportUnsolvedGoals [mvarId]
-- trace[Elab.definition.wf] "Found {rel} proof: {← instantiateMVars mvar}"
pure ()
| some decrTactic => Term.withoutErrToSorry do
-- make info from `runTactic` available
pushInfoTree (.hole mvarId)
Term.runTactic mvarId decrTactic
-- trace[Elab.definition.wf] "Found {rel} proof: {← instantiateMVars mvar}"
pure ()
trace[Elab.definition.wf] "inspectRecCall: success!"
return rel
catch _e =>
trace[Elab.definition.wf] "Did not find {rel} proof: {goalsToMessageData [mvarId]}"
continue
return .no_idea
/- A cache for `evalRecCall` -/
structure RecCallCache where mk'' ::
decrTactic? : Option Syntax
rcc : RecCallWithContext
cache : IO.Ref (Array (Array (Option GuessLexRel)))
/-- Create a cache to memoize calls to `evalRecCall descTactic? rcc` -/
def RecCallCache.mk (decrTactic? : Option Syntax) (rcc : RecCallWithContext) :
BaseIO RecCallCache := do
let cache IO.mkRef <| Array.mkArray rcc.params.size (Array.mkArray rcc.args.size Option.none)
return { decrTactic?, rcc, cache }
/-- Run `evalRecCall` and cache there result -/
def RecCallCache.eval (rc: RecCallCache) (paramIdx argIdx : Nat) : MetaM GuessLexRel := do
-- Check the cache first
if let Option.some res := ( rc.cache.get)[paramIdx]![argIdx]! then
return res
else
let res evalRecCall rc.decrTactic? rc.rcc paramIdx argIdx
rc.cache.modify (·.modify paramIdx (·.set! argIdx res))
return res
/-- Pretty-print the cache entries -/
def RecCallCache.pretty (rc : RecCallCache) : IO Format := do
let mut r := Format.nil
let d rc.cache.get
for h₁ : paramIdx in [:d.size] do
for h₂ : argIdx in [:(d[paramIdx]'h₁.2).size] do
if let .some entry := (d[paramIdx]'h₁.2)[argIdx]'h₂.2 then
r := r ++
f!"(Param {paramIdx}, arg {argIdx}): {entry}" ++ Format.line
return r
/-- The measures that we order lexicographically can be comparing arguments,
or numbering the functions -/
inductive MutualMeasure where
/-- For every function, the given argument index -/
| args : Array Nat MutualMeasure
/-- The given function index is assigned 1, the rest 0 -/
| func : Nat MutualMeasure
/-- Evaluate a recursive call at a given `MutualMeasure` -/
def inspectCall (rc : RecCallCache) : MutualMeasure MetaM GuessLexRel
| .args argIdxs => do
let paramIdx := argIdxs[rc.rcc.caller]!
let argIdx := argIdxs[rc.rcc.callee]!
rc.eval paramIdx argIdx
| .func funIdx => do
if rc.rcc.caller == funIdx && rc.rcc.callee != funIdx then
return .lt
if rc.rcc.caller != funIdx && rc.rcc.callee == funIdx then
return .no_idea
else
return .eq
/--
Given a predefinition with value `fun (x_₁ ... xₙ) (y_₁ : α₁)... (yₘ : αₘ) => ...`,
where `n = fixedPrefixSize`, return an array `A` s.t. `i ∈ A` iff `sizeOf yᵢ` reduces to a literal.
This is the case for types such as `Prop`, `Type u`, etc.
These arguments should not be considered when guessing a well-founded relation.
See `generateCombinations?`
-/
def getForbiddenByTrivialSizeOf (fixedPrefixSize : Nat) (preDef : PreDefinition) : MetaM (Array Nat) :=
lambdaTelescope preDef.value fun xs _ => do
let mut result := #[]
for x in xs[fixedPrefixSize:], i in [:xs.size] do
try
let sizeOf whnfD ( mkAppM ``sizeOf #[x])
if sizeOf.isLit then
result := result.push i
catch _ =>
result := result.push i
return result
/--
Generate all combination of arguments, skipping those that are forbidden.
Sorts the uniform combinations ([0,0,0], [1,1,1]) to the front; they are commonly most useful to
try first, when the mutually recursive functions have similar argument structures
-/
partial def generateCombinations? (forbiddenArgs : Array (Array Nat)) (numArgs : Array Nat)
(threshold : Nat := 32) : Option (Array (Array Nat)) :=
(do goUniform 0; go 0 #[]) |>.run #[] |>.2
where
isForbidden (fidx : Nat) (argIdx : Nat) : Bool :=
if h : fidx < forbiddenArgs.size then
forbiddenArgs[fidx] |>.contains argIdx
else
false
-- Enumerate all permissible uniform combinations
goUniform (argIdx : Nat) : OptionT (StateM (Array (Array Nat))) Unit := do
if numArgs.all (argIdx < ·) then
unless forbiddenArgs.any (·.contains argIdx) do
modify (·.push (Array.mkArray numArgs.size argIdx))
goUniform (argIdx + 1)
-- Enumerate all other permissible combinations
go (fidx : Nat) : OptionT (ReaderT (Array Nat) (StateM (Array (Array Nat)))) Unit := do
if h : fidx < numArgs.size then
let n := numArgs[fidx]
for argIdx in [:n] do
unless isForbidden fidx argIdx do
withReader (·.push argIdx) (go (fidx + 1))
else
let comb read
unless comb.all (· == comb[0]!) do
modify (·.push comb)
if ( get).size > threshold then
failure
/--
Enumerate all meausures we want to try: All arguments (resp. combinations thereof) and
possible orderings of functions (if more than one)
-/
def generateMeasures (forbiddenArgs : Array (Array Nat)) (arities : Array Nat) :
MetaM (Array MutualMeasure) := do
let some arg_measures := generateCombinations? forbiddenArgs arities
| throwError "Too many combinations"
let func_measures :=
if arities.size > 1 then
(List.range arities.size).toArray
else
#[]
return arg_measures.map .args ++ func_measures.map .func
/--
The core logic of guessing the lexicographic order
Given a matrix that for each call and measure indicates whether that measure is
decreasing, equal, less-or-equal or unknown, It finds a sequence of measures
that is lexicographically decreasing.
The matrix is implemented here as an array of monadic query methods only so that
we can fill is lazily. Morally, this is a pure function
-/
partial def solve {m} {α} [Monad m] (measures : Array α)
(calls : Array (α m GuessLexRel)) : m (Option (Array α)) := do
go measures calls #[]
where
go (measures : Array α) (calls : Array (α m GuessLexRel)) (acc : Array α) := do
if calls.isEmpty then return .some acc
-- Find the first measure that has at least one < and otherwise only = or <=
for h : measureIdx in [:measures.size] do
let measure := measures[measureIdx]'h.2
let mut has_lt := false
let mut all_le := true
let mut todo := #[]
for call in calls do
let entry call measure
if entry = .lt then
has_lt := true
else
todo := todo.push call
if entry != .le && entry != .eq then
all_le := false
break
-- No progress here? Try the next measure
if not (has_lt && all_le) then continue
-- We found a suitable measure, remove it from the list (mild optimization)
let measures' := measures.eraseIdx measureIdx
return go measures' todo (acc.push measure)
-- None found, we have to give up
return .none
/--
Create Tuple syntax (`()` if the array is empty, and just the value if its a singleton)
-/
def mkTupleSyntax : Array Term MetaM Term
| #[] => `(())
| #[e] => return e
| es => `(($(es[0]!), $(es[1:]),*))
/--
Given an array of `MutualMeasures`, creates a `TerminationWF` that specifies the lexicographic
combination of these measures.
-/
def buildTermWF (declNames : Array Name) (varNamess : Array (Array Name))
(measures : Array MutualMeasure) : MetaM TerminationWF := do
let mut termByElements := #[]
for h : funIdx in [:varNamess.size] do
let vars := (varNamess[funIdx]'h.2).map mkIdent
let body mkTupleSyntax ( measures.mapM fun
| .args varIdxs =>
let v := vars.get! (varIdxs[funIdx]!)
let sizeOfIdent := mkIdent ``sizeOf
`($sizeOfIdent $v)
| .func funIdx' => if funIdx' == funIdx then `(1) else `(0)
)
let declName := declNames[funIdx]!
trace[Elab.definition.wf] "Using termination {declName}: {vars} => {body}"
termByElements := termByElements.push
{ ref := .missing
declName, vars, body,
implicit := true
}
return .ext termByElements
end Lean.Elab.WF.GuessLex
namespace Lean.Elab.WF
open Lean.Elab.WF.GuessLex
/--
Main entry point of this module:
Try to find a lexicographic ordering of the arguments for which the recursive definition
terminates. See the module doc string for a high-level overview.
-/
def guessLex (preDefs : Array PreDefinition) (unaryPreDef : PreDefinition)
(fixedPrefixSize : Nat) (decrTactic? : Option Syntax) :
MetaM TerminationWF := do
try
let varNamess preDefs.mapM (naryVarNames fixedPrefixSize ·)
let arities := varNamess.map (·.size)
trace[Elab.definition.wf] "varNames is: {varNamess}"
let forbiddenArgs preDefs.mapM fun preDef =>
getForbiddenByTrivialSizeOf fixedPrefixSize preDef
-- The list of measures, including the measures that order functions.
-- The function ordering measures come last
let measures generateMeasures forbiddenArgs arities
-- If there is only one plausible measure, use that
if let #[solution] := measures then
return buildTermWF (preDefs.map (·.declName)) varNamess #[solution]
-- Collect all recursive calls and extract their context
let recCalls collectRecCalls unaryPreDef fixedPrefixSize arities
let rcs recCalls.mapM (RecCallCache.mk decrTactic? ·)
let callMatrix := rcs.map (inspectCall ·)
match liftMetaM <| solve measures callMatrix with
| .some solution => do
let wf buildTermWF (preDefs.map (·.declName)) varNamess solution
return wf
| .none => throwError "Cannot find a decreasing lexicographic order"
catch _ =>
-- Hide all errors from guessing lexicographic orderings, as before
-- Future work: explain the failure to the user, like Isabelle does
throwError "failed to prove termination, use `termination_by` to specify a well-founded relation"

View File

@@ -7,12 +7,10 @@ import Lean.Elab.PreDefinition.Basic
import Lean.Elab.PreDefinition.WF.TerminationHint
import Lean.Elab.PreDefinition.WF.PackDomain
import Lean.Elab.PreDefinition.WF.PackMutual
import Lean.Elab.PreDefinition.WF.Preprocess
import Lean.Elab.PreDefinition.WF.Rel
import Lean.Elab.PreDefinition.WF.Fix
import Lean.Elab.PreDefinition.WF.Eqns
import Lean.Elab.PreDefinition.WF.Ite
import Lean.Elab.PreDefinition.WF.GuessLex
namespace Lean.Elab
open WF
@@ -81,7 +79,6 @@ private def isOnlyOneUnaryDef (preDefs : Array PreDefinition) (fixedPrefixSize :
return false
def wfRecursion (preDefs : Array PreDefinition) (wf? : Option TerminationWF) (decrTactic? : Option Syntax) : TermElabM Unit := do
let preDefs preDefs.mapM fun preDef => return { preDef with value := ( preprocess preDef.value) }
let (unaryPreDef, fixedPrefixSize) withoutModifyingEnv do
for preDef in preDefs do
addAsAxiom preDef
@@ -90,17 +87,10 @@ def wfRecursion (preDefs : Array PreDefinition) (wf? : Option TerminationWF) (de
let preDefsDIte preDefs.mapM fun preDef => return { preDef with value := ( iteToDIte preDef.value) }
let unaryPreDefs packDomain fixedPrefixSize preDefsDIte
return ( packMutual fixedPrefixSize preDefs unaryPreDefs, fixedPrefixSize)
let wf
if let .some wf := wf? then
pure wf
else
guessLex preDefs unaryPreDef fixedPrefixSize decrTactic?
let preDefNonRec forallBoundedTelescope unaryPreDef.type fixedPrefixSize fun prefixArgs type => do
let type whnfForall type
let packedArgType := type.bindingDomain!
elabWFRel preDefs unaryPreDef.declName fixedPrefixSize packedArgType wf fun wfRel => do
elabWFRel preDefs unaryPreDef.declName fixedPrefixSize packedArgType wf? fun wfRel => do
trace[Elab.definition.wf] "wfRel: {wfRel}"
let (value, envNew) withoutModifyingEnv' do
addAsAxiom unaryPreDef
@@ -117,13 +107,10 @@ def wfRecursion (preDefs : Array PreDefinition) (wf? : Option TerminationWF) (de
withEnableInfoTree false do
addNonRec preDefNonRec (applyAttrAfterCompilation := false)
addNonRecPreDefs preDefs preDefNonRec fixedPrefixSize
-- We create the `_unsafe_rec` before we abstract nested proofs.
-- Reason: the nested proofs may be referring to the _unsafe_rec.
addAndCompilePartialRec preDefs
let preDefs preDefs.mapM (abstractNestedProofs ·)
registerEqnsInfo preDefs preDefNonRec.declName fixedPrefixSize
for preDef in preDefs do
applyAttributesOf #[preDef] AttributeApplicationTime.afterCompilation
addAndCompilePartialRec preDefs
builtin_initialize registerTraceClass `Elab.definition.wf

View File

@@ -124,8 +124,7 @@ where
let args := e.getAppArgs
let fNew := mkConst preDefsNew[funIdx]!.declName f.constLevels!
let fNew := mkAppN fNew args[:fixedPrefix]
let Expr.forallE _ d .. whnf ( inferType fNew) | unreachable!
-- NB: Use whnf in case the type is not a manifest forall, but a definition around it
let Expr.forallE _ d .. inferType fNew | unreachable!
let argNew mkUnaryArg d args[fixedPrefix:]
return mkApp fNew argNew
let rec

View File

@@ -51,13 +51,13 @@ private partial def mkNewCoDomain (preDefsOriginal : Array PreDefinition) (preDe
let casesOn := mkAppN casesOn xTypeArgs -- parameters
let casesOn := mkApp casesOn ( mkLambdaFVars #[x] (mkSort u)) -- motive
let casesOn := mkApp casesOn x -- major
let minor1 withLocalDeclD ( mkFreshUserName `_x) xTypeArgs[0]! fun x => do
mkLambdaFVars #[x] (( whnf preDefTypes[i]!).bindingBody!.instantiate1 x)
let minor1 withLocalDeclD ( mkFreshUserName `_x) xTypeArgs[0]! fun x =>
mkLambdaFVars #[x] (preDefTypes[i]!.bindingBody!.instantiate1 x)
let minor2 withLocalDeclD ( mkFreshUserName `_x) xTypeArgs[1]! fun x => do
mkLambdaFVars #[x] ( go x (i+1))
return mkApp2 casesOn minor1 minor2
else
return ( whnf preDefTypes[i]!).bindingBody!.instantiate1 x
return preDefTypes[i]!.bindingBody!.instantiate1 x
go x 0
/--
@@ -90,52 +90,34 @@ private partial def packValues (x : Expr) (codomain : Expr) (preDefValues : Arra
go mvar.mvarId! x.fvarId! 0
instantiateMVars mvar
/--
Pass the first `n` arguments of `e` to the continuation, and apply the result to the
remaining arguments. If `e` does not have enough arguments, it is eta-expanded as needed.
Unlike `Meta.etaExpand` does not use `withDefault`.
-/
def withAppN (n : Nat) (e : Expr) (k : Array Expr MetaM Expr) : MetaM Expr := do
let args := e.getAppArgs
if n args.size then
let e' k args[:n]
return mkAppN e' args[n:]
else
let missing := n - args.size
forallBoundedTelescope ( inferType e) missing fun xs _ => do
if xs.size < missing then
throwError "Failed to eta-expand partial application"
let e' k (args ++ xs)
mkLambdaFVars xs e'
/--
Auxiliary function for replacing nested `preDefs` recursive calls in `e` with the new function `newFn`.
See: `packMutual`
-/
private partial def post (fixedPrefix : Nat) (preDefs : Array PreDefinition) (domain : Expr) (newFn : Name) (e : Expr) : MetaM TransformStep := do
if e.getAppNumArgs != fixedPrefix + 1 then
return TransformStep.done e
let f := e.getAppFn
if !f.isConst then
return TransformStep.done e
let declName := f.constName!
let us := f.constLevels!
if let some fidx := preDefs.findIdx? (·.declName == declName) then
let e' withAppN (fixedPrefix + 1) e fun args => do
let fixedArgs := args[:fixedPrefix]
let arg := args[fixedPrefix]!
let rec mkNewArg (i : Nat) (type : Expr) : MetaM Expr := do
if i == preDefs.size - 1 then
return arg
else
( whnfD type).withApp fun f args => do
assert! args.size == 2
if i == fidx then
return mkApp3 (mkConst ``PSum.inl f.constLevels!) args[0]! args[1]! arg
else
let r mkNewArg (i+1) args[1]!
return mkApp3 (mkConst ``PSum.inr f.constLevels!) args[0]! args[1]! r
return mkApp (mkAppN (mkConst newFn us) fixedArgs) ( mkNewArg 0 domain)
return TransformStep.done e'
let args := e.getAppArgs
let fixedArgs := args[:fixedPrefix]
let arg := args.back
let rec mkNewArg (i : Nat) (type : Expr) : MetaM Expr := do
if i == preDefs.size - 1 then
return arg
else
( whnfD type).withApp fun f args => do
assert! args.size == 2
if i == fidx then
return mkApp3 (mkConst ``PSum.inl f.constLevels!) args[0]! args[1]! arg
else
let r mkNewArg (i+1) args[1]!
return mkApp3 (mkConst ``PSum.inr f.constLevels!) args[0]! args[1]! r
return TransformStep.done <| mkApp (mkAppN (mkConst newFn us) fixedArgs) ( mkNewArg 0 domain)
return TransformStep.done e
partial def withFixedPrefix (fixedPrefix : Nat) (preDefs : Array PreDefinition) (k : Array Expr Array Expr Array Expr MetaM α) : MetaM α :=
@@ -192,7 +174,7 @@ where
def packMutual (fixedPrefix : Nat) (preDefsOriginal : Array PreDefinition) (preDefs : Array PreDefinition) : MetaM PreDefinition := do
if preDefs.size == 1 then return preDefs[0]!
withFixedPrefix fixedPrefix preDefs fun ys types vals => do
let domains types.mapM fun type => do pure ( whnf type).bindingDomain!
let domains := types.map fun type => type.bindingDomain!
let domain mkNewDomain domains
withLocalDeclD ( mkFreshUserName `_x) domain fun x => do
let codomain mkNewCoDomain preDefsOriginal types x
@@ -201,7 +183,7 @@ def packMutual (fixedPrefix : Nat) (preDefsOriginal : Array PreDefinition) (preD
let newFn := preDefs[0]!.declName ++ `_mutual
let preDefNew := { preDefs[0]! with declName := newFn, type, value }
addAsAxiom preDefNew
let value transform value (skipConstInApp := true) (post := post fixedPrefix preDefs domain newFn)
let value transform value (post := post fixedPrefix preDefs domain newFn)
let value mkLambdaFVars (ys.push x) value
return { preDefNew with value }

View File

@@ -1,37 +0,0 @@
/-
Copyright (c) 2021 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Leonardo de Moura
-/
import Lean.Meta.Transform
import Lean.Elab.RecAppSyntax
namespace Lean.Elab.WF
open Meta
/--
Preprocesses the expessions to improve the effectiveness of `wfRecursion`.
* Floats out the RecApp markers.
Example:
```
def f : Nat → Nat
| 0 => 1
| i+1 => (f x) i
```
Unlike `Lean.Elab.Structural.preprocess`, do _not_ beta-reduce, as it could
remove `let_fun`-lambdas that contain explicit termination proofs.
-/
def preprocess (e : Expr) : CoreM Expr :=
Core.transform e
(post := fun e =>
match e with
| .app (.mdata m f) a =>
if m.isRecApp then
return .done (.mdata m (.app f a))
else
return .done e
| _ => return .done e)
end Lean.Elab.WF

View File

@@ -53,21 +53,67 @@ private partial def unpackUnary (preDef : PreDefinition) (prefixSize : Nat) (mva
mvarId.rename fvarId varNames.back
go 0 mvarId fvarId
def elabWFRel (preDefs : Array PreDefinition) (unaryPreDefName : Name) (fixedPrefixSize : Nat)
(argType : Expr) (wf : TerminationWF) (k : Expr TermElabM α) : TermElabM α := do
def getNumCandidateArgs (fixedPrefixSize : Nat) (preDefs : Array PreDefinition) : MetaM (Array Nat) := do
preDefs.mapM fun preDef =>
lambdaTelescope preDef.value fun xs _ =>
return xs.size - fixedPrefixSize
/--
Given a predefinition with value `fun (x_₁ ... xₙ) (y_₁ : α₁)... (yₘ : αₘ) => ...`,
where `n = fixedPrefixSize`, return an array `A` s.t. `i ∈ A` iff `sizeOf yᵢ` reduces to a literal.
This is the case for types such as `Prop`, `Type u`, etc.
This arguments should not be considered when guessing a well-founded relation.
See `generateCombinations?`
-/
def getForbiddenByTrivialSizeOf (fixedPrefixSize : Nat) (preDef : PreDefinition) : MetaM (Array Nat) :=
lambdaTelescope preDef.value fun xs _ => do
let mut result := #[]
for x in xs[fixedPrefixSize:], i in [:xs.size] do
try
let sizeOf whnfD ( mkAppM ``sizeOf #[x])
if sizeOf.isLit then
result := result.push i
catch _ =>
result := result.push i
return result
def generateCombinations? (forbiddenArgs : Array (Array Nat)) (numArgs : Array Nat) (threshold : Nat := 32) : Option (Array (Array Nat)) :=
go 0 #[] |>.run #[] |>.2
where
isForbidden (fidx : Nat) (argIdx : Nat) : Bool :=
if h : fidx < forbiddenArgs.size then
forbiddenArgs.get fidx, h |>.contains argIdx
else
false
go (fidx : Nat) : OptionT (ReaderT (Array Nat) (StateM (Array (Array Nat)))) Unit := do
if h : fidx < numArgs.size then
let n := numArgs.get fidx, h
for argIdx in [:n] do
unless isForbidden fidx argIdx do
withReader (·.push argIdx) (go (fidx + 1))
else
modify (·.push ( read))
if ( get).size > threshold then
failure
termination_by _ fidx => numArgs.size - fidx
def elabWFRel (preDefs : Array PreDefinition) (unaryPreDefName : Name) (fixedPrefixSize : Nat) (argType : Expr) (wf? : Option TerminationWF) (k : Expr TermElabM α) : TermElabM α := do
let α := argType
let u getLevel α
let expectedType := mkApp (mkConst ``WellFoundedRelation [u]) α
trace[Elab.definition.wf] "elabWFRel start: {(← mkFreshTypeMVar).mvarId!}"
withDeclName unaryPreDefName do
match wf with
| TerminationWF.core wfStx =>
match wf? with
| some (TerminationWF.core wfStx) => withDeclName unaryPreDefName do
let wfRel instantiateMVars ( withSynthesize <| elabTermEnsuringType wfStx expectedType)
let pendingMVarIds getMVars wfRel
discard <| logUnassignedUsingErrorInfos pendingMVarIds
k wfRel
| TerminationWF.ext elements =>
withRef (getRefFromElems elements) do
| some (TerminationWF.ext elements) => go expectedType elements
| none => guess expectedType
where
go (expectedType : Expr) (elements : Array TerminationByElement) : TermElabM α :=
withDeclName unaryPreDefName <| withRef (getRefFromElems elements) do
let mainMVarId := ( mkFreshExprSyntheticOpaqueMVar expectedType).mvarId!
let [fMVarId, wfRelMVarId, _] mainMVarId.apply ( mkConstWithFreshMVarLevels ``invImage) | throwError "failed to apply 'invImage'"
let (d, fMVarId) fMVarId.intro1
@@ -81,4 +127,38 @@ def elabWFRel (preDefs : Array PreDefinition) (unaryPreDefName : Name) (fixedPre
wfRelMVarId.assign wfRelVal
k ( instantiateMVars (mkMVar mainMVarId))
generateElements (numArgs : Array Nat) (argCombination : Array Nat) : TermElabM (Array TerminationByElement) := do
let mut result := #[]
let var `(x)
let hole `(_)
for preDef in preDefs, numArg in numArgs, argIdx in argCombination, i in [:preDefs.size] do
let mut vars := #[var]
for _ in [:numArg - argIdx - 1] do
vars := vars.push hole
-- TODO: improve this.
-- The following trick allows a function `f` in a mutual block to invoke `g` appearing before it with the input argument.
-- We should compute the "right" order (if there is one) in the future.
let body if preDefs.size > 1 then `((sizeOf x, $(quote i))) else `(sizeOf x)
result := result.push {
ref := preDef.ref
declName := preDef.declName
vars := vars
body := body
implicit := false
}
return result
guess (expectedType : Expr) : TermElabM α := do
-- TODO: add support for lex
let numArgs getNumCandidateArgs fixedPrefixSize preDefs
-- TODO: include in `forbiddenArgs` arguments that are fixed but are not in the fixed prefix
let forbiddenArgs preDefs.mapM fun preDef => getForbiddenByTrivialSizeOf fixedPrefixSize preDef
-- TODO: add option to control the maximum number of cases to try
if let some combs := generateCombinations? forbiddenArgs numArgs then
for comb in combs do
let elements generateElements numArgs comb
if let some r observing? (go expectedType elements) then
return r
throwError "failed to prove termination, use `termination_by` to specify a well-founded relation"
end Lean.Elab.WF

View File

@@ -53,6 +53,7 @@ def TerminationHint.markAsUsed (t : TerminationHint) (clique : Array Name) : Ter
| TerminationHint.many m => Id.run do
for declName in clique do
if m.contains declName then
let m := m.erase declName
let m := m.erase declName
if m.isEmpty then
return TerminationHint.none

View File

@@ -27,10 +27,4 @@ def getRecAppSyntax? (e : Expr) : Option Syntax :=
| _ => none
| _ => none
/--
Checks if the `MData` is for a recursive applciation.
-/
def MData.isRecApp (d : MData) : Bool :=
d.contains recAppKey
end Lean

View File

@@ -101,9 +101,8 @@ leading_parser try (declModifiers >> ident >> " :: ")
private def expandCtor (structStx : Syntax) (structModifiers : Modifiers) (structDeclName : Name) : TermElabM StructCtorView := do
let useDefault := do
let declName := structDeclName ++ defaultCtorName
let ref := structStx[1].mkSynthetic
addAuxDeclarationRanges declName ref ref
pure { ref, modifiers := {}, name := defaultCtorName, declName }
addAuxDeclarationRanges declName structStx[2] structStx[2]
pure { ref := structStx, modifiers := {}, name := defaultCtorName, declName }
if structStx[5].isNone then
useDefault
else
@@ -124,7 +123,7 @@ private def expandCtor (structStx : Syntax) (structModifiers : Modifiers) (struc
let declName applyVisibility ctorModifiers.visibility declName
addDocString' declName ctorModifiers.docString?
addAuxDeclarationRanges declName ctor[1] ctor[1]
pure { ref := ctor[1], name, modifiers := ctorModifiers, declName }
pure { ref := ctor, name, modifiers := ctorModifiers, declName }
def checkValidFieldModifier (modifiers : Modifiers) : TermElabM Unit := do
if modifiers.isNoncomputable then
@@ -411,15 +410,14 @@ where
| none =>
let some fieldInfo := getFieldInfo? ( getEnv) parentStructName fieldName | unreachable!
let addNewField : TermElabM α := do
let value? copyDefaultValue? fieldMap expandedStructNames parentStructName fieldName
withLocalDecl fieldName fieldInfo.binderInfo fieldType fun fieldFVar => do
let fieldMap := fieldMap.insert fieldName fieldFVar
let value? copyDefaultValue? fieldMap expandedStructNames parentStructName fieldName
let fieldDeclName := structDeclName ++ fieldName
let fieldDeclName applyVisibility ( toVisibility fieldInfo) fieldDeclName
addDocString' fieldDeclName ( findDocString? ( getEnv) fieldInfo.projFn)
let infos := infos.push { name := fieldName, declName := fieldDeclName, fvar := fieldFVar, value?,
kind := StructFieldKind.copiedField }
copy (i+1) infos fieldMap expandedStructNames
copy (i+1) infos (fieldMap.insert fieldName fieldFVar) expandedStructNames
if fieldInfo.subobject?.isSome then
let fieldParentStructName getStructureName fieldType
if ( findExistingField? infos fieldParentStructName).isSome then
@@ -841,8 +839,8 @@ private def elabStructureView (view : StructView) : TermElabM Unit := do
pure (info.isSubobject && decl.binderInfo.isInstImplicit)
withSaveInfoContext do -- save new env
Term.addLocalVarInfo view.ref[1] ( mkConstWithLevelParams view.declName)
if let some _ := view.ctor.ref.getPos? (canonicalOnly := true) then
Term.addTermInfo' view.ctor.ref ( mkConstWithLevelParams view.ctor.declName) (isBinder := true)
if let some _ := view.ctor.ref[1].getPos? (canonicalOnly := true) then
Term.addTermInfo' view.ctor.ref[1] ( mkConstWithLevelParams view.ctor.declName) (isBinder := true)
for field in view.fields do
-- may not exist if overriding inherited field
if ( getEnv).contains field.declName then

View File

@@ -231,7 +231,7 @@ private def reportStuckSyntheticMVars (ignoreStuckTC := false) : TermElabM Unit
for mvarId in pendingMVars do
reportStuckSyntheticMVar mvarId ignoreStuckTC
private def getSomeSyntheticMVarsRef : TermElabM Syntax := do
private def getSomeSynthethicMVarsRef : TermElabM Syntax := do
for mvarId in ( get).pendingMVars do
if let some decl getSyntheticMVarDecl? mvarId then
if decl.stx.getPos?.isSome then
@@ -395,7 +395,7 @@ mutual
-/
partial def synthesizeSyntheticMVars (mayPostpone := true) (ignoreStuckTC := false) : TermElabM Unit := do
let rec loop (_ : Unit) : TermElabM Unit := do
withRef ( getSomeSyntheticMVarsRef) <| withIncRecDepth do
withRef ( getSomeSynthethicMVarsRef) <| withIncRecDepth do
unless ( get).pendingMVars.isEmpty do
if synthesizeSyntheticMVarsStep (postponeOnError := false) (runTactics := false) then
loop ()

View File

@@ -232,15 +232,12 @@ partial def evalChoiceAux (tactics : Array Syntax) (i : Nat) : TacticM Unit :=
| some msg => withRef stx[0] <| addRawTrace msg
@[builtin_tactic Lean.Parser.Tactic.assumption] def evalAssumption : Tactic := fun _ =>
-- The `withAssignableSyntheticOpaque` is needed here to accommodate
-- `assumption` after `refine`.
-- See https://github.com/leanprover/lean4/issues/2361
liftMetaTactic fun mvarId => withAssignableSyntheticOpaque do mvarId.assumption; pure []
liftMetaTactic fun mvarId => do mvarId.assumption; pure []
@[builtin_tactic Lean.Parser.Tactic.contradiction] def evalContradiction : Tactic := fun _ =>
liftMetaTactic fun mvarId => do mvarId.contradiction; pure []
@[builtin_tactic Lean.Parser.Tactic.eqRefl] def evalRefl : Tactic := fun _ =>
@[builtin_tactic Lean.Parser.Tactic.refl] def evalRefl : Tactic := fun _ =>
liftMetaTactic fun mvarId => do mvarId.refl; pure []
@[builtin_tactic Lean.Parser.Tactic.intro] def evalIntro : Tactic := fun stx => do

View File

@@ -92,7 +92,7 @@ def sortMVarIdsByIndex [MonadMCtx m] [Monad m] (mvarIds : List MVarId) : m (List
def withCollectingNewGoalsFrom (k : TacticM Expr) (tagSuffix : Name) (allowNaturalHoles := false) : TacticM (Expr × List MVarId) :=
/-
When `allowNaturalHoles = true`, unassigned holes should become new metavariables, including `_`s.
Thus, we set `holesAsSyntheticOpaque` to true if it is not already set to `true`.
Thus, we set `holesAsSynthethicOpaque` to true if it is not already set to `true`.
See issue #1681. We have the tactic
```
`refine' (fun x => _)
@@ -246,7 +246,7 @@ def elabTermForApply (stx : Syntax) (mayPostpone := true) : TacticM Expr := do
By disabling "error to sorry", we also limit ourselves to at most one error at `t[h']`.
By disabling "error to sorry", we also miss the opportunity to catch mistakes in tactic code such as
By disabling "error to sorry", we also miss the opportunity to catch mistakes is tactic code such as
`first | apply nonsensical-term | assumption`
This should not be a big problem for the `apply` tactic since we usually provide small terms there.
@@ -317,7 +317,7 @@ def evalApplyLikeTactic (tac : MVarId → Expr → MetaM (List MVarId)) (e : Syn
withTransparency TransparencyMode.all <| evalTactic stx[1]
/--
Elaborate `stx`. If it is a free variable, return it. Otherwise, assert it, and return the free variable.
Elaborate `stx`. If it a free variable, return it. Otherwise, assert it, and return the free variable.
Note that, the main goal is updated when `Meta.assert` is used in the second case. -/
def elabAsFVar (stx : Syntax) (userName? : Option Name := none) : TacticM FVarId :=
withMainContext do

View File

@@ -264,6 +264,7 @@ def reorderAlts (alts : Array Alt) (altsSyntax : Array Syntax) : Array Alt := Id
def evalAlts (elimInfo : ElimInfo) (alts : Array Alt) (optPreTac : Syntax) (altsSyntax : Array Syntax)
(initialInfo : Info)
(numEqs : Nat := 0) (numGeneralized : Nat := 0) (toClear : Array FVarId := #[]) : TacticM Unit := do
checkAltNames alts altsSyntax
let hasAlts := altsSyntax.size > 0
if hasAlts then
-- default to initial state outside of alts
@@ -275,7 +276,6 @@ def evalAlts (elimInfo : ElimInfo) (alts : Array Alt) (optPreTac : Syntax) (alts
else go
where
go := do
checkAltNames alts altsSyntax
let alts := reorderAlts alts altsSyntax
let hasAlts := altsSyntax.size > 0
let mut usedWildcard := false

View File

@@ -39,16 +39,9 @@ def expandOptLocation (stx : Syntax) : Location :=
open Meta
/--
Runs the given `atLocal` and `atTarget` methods on each of the locations selected by the given `loc`.
* If `loc` is a list of locations, runs at each specified hypothesis (and finally the goal if `⊢` is included),
and fails if any of the tactic applications fail.
* If `loc` is `*`, runs at the target first and then the hypotheses in reverse order.
If all tactic applications fail, `withLocation` with call `failed` with the main goal mvar.
If the tactic application closes the main goal, `withLocation` will then fail with `no goals to be solved`
upon reaching the first hypothesis.
-/
/-- Runs the given `atLocal` and `atTarget` methods on each of the locations selected by the given `loc`.
If any of the selected tactic applications fail, it will call `failed` with the main goal mvar.
-/
def withLocation (loc : Location) (atLocal : FVarId TacticM Unit) (atTarget : TacticM Unit) (failed : MVarId TacticM Unit) : TacticM Unit := do
match loc with
| Location.targets hyps type =>

View File

@@ -19,15 +19,13 @@ def rewriteTarget (stx : Syntax) (symm : Bool) (config : Rewrite.Config := {}) :
replaceMainGoal (mvarId' :: r.mvarIds)
def rewriteLocalDecl (stx : Syntax) (symm : Bool) (fvarId : FVarId) (config : Rewrite.Config := {}) :
TacticM Unit := withMainContext do
-- Note: we cannot execute `replaceLocalDecl` inside `Term.withSynthesize`.
-- See issues #2711 and #2727.
let rwResult Term.withSynthesize <| withMainContext do
TacticM Unit := do
Term.withSynthesize <| withMainContext do
let e elabTerm stx none true
let localDecl fvarId.getDecl
( getMainGoal).rewrite localDecl.type e symm (config := config)
let replaceResult ( getMainGoal).replaceLocalDecl fvarId rwResult.eNew rwResult.eqProof
replaceMainGoal (replaceResult.mvarId :: rwResult.mvarIds)
let rwResult ( getMainGoal).rewrite localDecl.type e symm (config := config)
let replaceResult ( getMainGoal).replaceLocalDecl fvarId rwResult.eNew rwResult.eqProof
replaceMainGoal (replaceResult.mvarId :: rwResult.mvarIds)
def withRWRulesSeq (token : Syntax) (rwRulesSeqStx : Syntax) (x : (symm : Bool) (term : Syntax) TacticM Unit) : TacticM Unit := do
let lbrak := rwRulesSeqStx[0]
@@ -45,18 +43,14 @@ def withRWRulesSeq (token : Syntax) (rwRulesSeqStx : Syntax) (x : (symm : Bool)
let symm := !rule[0].isNone
let term := rule[1]
let processId (id : Syntax) : TacticM Unit := do
-- See if we can interpret `id` as a hypothesis first.
if ( optional <| getFVarId id).isSome then
x symm term
else
-- Try to get equation theorems for `id`.
let declName try resolveGlobalConstNoOverload id catch _ => return ( x symm term)
let some eqThms getEqnsFor? declName (nonRec := true) | x symm term
let rec go : List Name TacticM Unit
| [] => throwError "failed to rewrite using equation theorems for '{declName}'"
| eqThm::eqThms => (x symm (mkIdentFrom id eqThm)) <|> go eqThms
go eqThms.toList
discard <| Term.addTermInfo id ( mkConstWithFreshMVarLevels declName) (lctx? := getLCtx)
-- Try to get equation theorems for `id` first
let declName try resolveGlobalConstNoOverload id catch _ => return ( x symm term)
let some eqThms getEqnsFor? declName (nonRec := true) | x symm term
let rec go : List Name TacticM Unit
| [] => throwError "failed to rewrite using equation theorems for '{declName}'"
| eqThm::eqThms => (x symm (mkIdentFrom id eqThm)) <|> go eqThms
go eqThms.toList
discard <| Term.addTermInfo id ( mkConstWithFreshMVarLevels declName) (lctx? := getLCtx)
match term with
| `($id:ident) => processId id
| `(@$id:ident) => processId id

View File

@@ -101,17 +101,6 @@ private def addDeclToUnfoldOrTheorem (thms : Meta.SimpTheorems) (id : Origin) (e
return thms.addDeclToUnfoldCore declName
else
thms.addDeclToUnfold declName
else if e.isFVar then
let fvarId := e.fvarId!
let decl fvarId.getDecl
if ( isProp decl.type) then
thms.add id #[] e (post := post) (inv := inv)
else if !decl.isLet then
throwError "invalid argument, variable is not a proposition or let-declaration"
else if inv then
throwError "invalid '←' modifier, '{e}' is a let-declaration name to be unfolded"
else
return thms.addLetDeclToUnfold fvarId
else
thms.add id #[] e (post := post) (inv := inv)
@@ -248,10 +237,6 @@ def mkSimpContext (stx : Syntax) (eraseLocal : Bool) (kind := SimpKind.simp) (ig
else
let ctx := r.ctx
let mut simpTheorems := ctx.simpTheorems
/-
When using `zeta := false`, we do not expand let-declarations when using `[*]`.
Users must explicitly include it in the list.
-/
let hs getPropHyps
for h in hs do
unless simpTheorems.isErased (.fvar h) do
@@ -283,7 +268,7 @@ def traceSimpCall (stx : Syntax) (usedSimps : UsedSimps) : MetaM Unit := do
| .fvar fvarId => -- local hypotheses in the context
if let some ldecl := lctx.find? fvarId then
localsOrStar := localsOrStar.bind fun locals =>
if !ldecl.userName.isInaccessibleUserName && !ldecl.userName.hasMacroScopes &&
if !ldecl.userName.isInaccessibleUserName &&
(lctx.findFromUserName? ldecl.userName).get!.fvarId == ldecl.fvarId then
some (locals.push ldecl.userName)
else
@@ -337,14 +322,14 @@ where
/-
"simp " (config)? (discharger)? ("only ")? ("[" simpLemma,* "]")? (location)?
-/
@[builtin_tactic Lean.Parser.Tactic.simp] def evalSimp : Tactic := fun stx => withMainContext do
let { ctx, dischargeWrapper } mkSimpContext stx (eraseLocal := false)
@[builtin_tactic Lean.Parser.Tactic.simp] def evalSimp : Tactic := fun stx => do
let { ctx, dischargeWrapper } withMainContext <| mkSimpContext stx (eraseLocal := false)
let usedSimps dischargeWrapper.with fun discharge? =>
simpLocation ctx discharge? (expandOptLocation stx[5])
if tactic.simp.trace.get ( getOptions) then
traceSimpCall stx usedSimps
@[builtin_tactic Lean.Parser.Tactic.simpAll] def evalSimpAll : Tactic := fun stx => withMainContext do
@[builtin_tactic Lean.Parser.Tactic.simpAll] def evalSimpAll : Tactic := fun stx => do
let { ctx, .. } mkSimpContext stx (eraseLocal := true) (kind := .simpAll) (ignoreStarArg := true)
let (result?, usedSimps) simpAll ( getMainGoal) ctx
match result? with
@@ -370,7 +355,7 @@ where
| none => replaceMainGoal []
| some mvarId => replaceMainGoal [mvarId]
if tactic.simp.trace.get ( getOptions) then
mvarId.withContext <| traceSimpCall ( getRef) usedSimps
traceSimpCall ( getRef) usedSimps
@[builtin_tactic Lean.Parser.Tactic.dsimp] def evalDSimp : Tactic := fun stx => do
let { ctx, .. } withMainContext <| mkSimpContext stx (eraseLocal := false) (kind := .dsimp)

View File

@@ -211,7 +211,7 @@ structure Context where
saveRecAppSyntax : Bool := true
/--
If `holesAsSyntheticOpaque` is `true`, then we mark metavariables associated
with `_`s as `syntheticOpaque` if they do not occur in patterns.
with `_`s as `synthethicOpaque` if they do not occur in patterns.
This option is useful when elaborating terms in tactics such as `refine'` where
we want holes there to become new goals. See issue #1681, we have
`refine' (fun x => _)

View File

@@ -50,7 +50,7 @@ def getBetterRef (ref : Syntax) (macroStack : MacroStack) : Syntax :=
register_builtin_option pp.macroStack : Bool := {
defValue := false
group := "pp"
descr := "display macro expansion stack"
descr := "dispaly macro expansion stack"
}
def addMacroStack {m} [Monad m] [MonadOptions m] (msgData : MessageData) (macroStack : MacroStack) : m MessageData := do

View File

@@ -255,20 +255,20 @@ structure EnvExtensionInterface where
ext : Type Type
inhabitedExt : Inhabited σ Inhabited (ext σ)
registerExt (mkInitial : IO σ) : IO (ext σ)
setState (e : ext σ) (exts : Array EnvExtensionState) : σ Array EnvExtensionState
modifyState (e : ext σ) (exts : Array EnvExtensionState) : (σ σ) Array EnvExtensionState
getState [Inhabited σ] (e : ext σ) (exts : Array EnvExtensionState) : σ
setState (e : ext σ) (env : Environment) : σ Environment
modifyState (e : ext σ) (env : Environment) : (σ σ) Environment
getState [Inhabited σ] (e : ext σ) (env : Environment) : σ
mkInitialExtStates : IO (Array EnvExtensionState)
ensureExtensionsSize : Array EnvExtensionState IO (Array EnvExtensionState)
ensureExtensionsSize : Environment IO Environment
instance : Inhabited EnvExtensionInterface where
default := {
ext := id
inhabitedExt := id
ensureExtensionsSize := fun exts => pure exts
ensureExtensionsSize := fun env => pure env
registerExt := fun mk => mk
setState := fun _ exts _ => exts
modifyState := fun _ exts _ => exts
setState := fun _ env _ => env
modifyState := fun _ env _ => env
getState := fun ext _ => ext
mkInitialExtStates := pure #[]
}
@@ -290,40 +290,41 @@ private builtin_initialize envExtensionsRef : IO.Ref (Array (Ext EnvExtensionSta
user-defined environment extensions. When this happens, we must adjust the size of the `env.extensions`.
This method is invoked when processing `import`s.
-/
partial def ensureExtensionsArraySize (exts : Array EnvExtensionState) : IO (Array EnvExtensionState) := do
loop exts.size exts
partial def ensureExtensionsArraySize (env : Environment) : IO Environment := do
loop env.extensions.size env
where
loop (i : Nat) (exts : Array EnvExtensionState) : IO (Array EnvExtensionState) := do
loop (i : Nat) (env : Environment) : IO Environment := do
let envExtensions envExtensionsRef.get
if i < envExtensions.size then
let s envExtensions[i]!.mkInitial
let exts := exts.push s
loop (i + 1) exts
let env := { env with extensions := env.extensions.push s }
loop (i + 1) env
else
return exts
return env
private def invalidExtMsg := "invalid environment extension has been accessed"
unsafe def setState {σ} (ext : Ext σ) (exts : Array EnvExtensionState) (s : σ) : Array EnvExtensionState :=
if h : ext.idx < exts.size then
exts.set ext.idx, h (unsafeCast s)
unsafe def setState {σ} (ext : Ext σ) (env : Environment) (s : σ) : Environment :=
if h : ext.idx < env.extensions.size then
{ env with extensions := env.extensions.set ext.idx, h (unsafeCast s) }
else
have : Inhabited (Array EnvExtensionState) := exts
have : Inhabited Environment := env
panic! invalidExtMsg
@[inline] unsafe def modifyState {σ : Type} (ext : Ext σ) (exts : Array EnvExtensionState) (f : σ σ) : Array EnvExtensionState :=
if ext.idx < exts.size then
exts.modify ext.idx fun s =>
let s : σ := unsafeCast s
let s : σ := f s
unsafeCast s
@[inline] unsafe def modifyState {σ : Type} (ext : Ext σ) (env : Environment) (f : σ σ) : Environment :=
if ext.idx < env.extensions.size then
{ env with
extensions := env.extensions.modify ext.idx fun s =>
let s : σ := unsafeCast s
let s : σ := f s
unsafeCast s }
else
have : Inhabited (Array EnvExtensionState) := exts
have : Inhabited Environment := env
panic! invalidExtMsg
unsafe def getState {σ} [Inhabited σ] (ext : Ext σ) (exts : Array EnvExtensionState) : σ :=
if h : ext.idx < exts.size then
let s : EnvExtensionState := exts.get ext.idx, h
unsafe def getState {σ} [Inhabited σ] (ext : Ext σ) (env : Environment) : σ :=
if h : ext.idx < env.extensions.size then
let s : EnvExtensionState := env.extensions.get ext.idx, h
unsafeCast s
else
panic! invalidExtMsg
@@ -362,22 +363,14 @@ opaque EnvExtensionInterfaceImp : EnvExtensionInterface
def EnvExtension (σ : Type) : Type := EnvExtensionInterfaceImp.ext σ
private def ensureExtensionsArraySize (env : Environment) : IO Environment := do
let exts EnvExtensionInterfaceImp.ensureExtensionsSize env.extensions
return { env with extensions := exts }
private def ensureExtensionsArraySize (env : Environment) : IO Environment :=
EnvExtensionInterfaceImp.ensureExtensionsSize env
namespace EnvExtension
instance {σ} [s : Inhabited σ] : Inhabited (EnvExtension σ) := EnvExtensionInterfaceImp.inhabitedExt s
def setState {σ : Type} (ext : EnvExtension σ) (env : Environment) (s : σ) : Environment :=
{ env with extensions := EnvExtensionInterfaceImp.setState ext env.extensions s }
def modifyState {σ : Type} (ext : EnvExtension σ) (env : Environment) (f : σ σ) : Environment :=
{ env with extensions := EnvExtensionInterfaceImp.modifyState ext env.extensions f }
def getState {σ : Type} [Inhabited σ] (ext : EnvExtension σ) (env : Environment) : σ :=
EnvExtensionInterfaceImp.getState ext env.extensions
def setState {σ : Type} (ext : EnvExtension σ) (env : Environment) (s : σ) : Environment := EnvExtensionInterfaceImp.setState ext env s
def modifyState {σ : Type} (ext : EnvExtension σ) (env : Environment) (f : σ σ) : Environment := EnvExtensionInterfaceImp.modifyState ext env f
def getState {σ : Type} [Inhabited σ] (ext : EnvExtension σ) (env : Environment) : σ := EnvExtensionInterfaceImp.getState ext env
end EnvExtension
/-- Environment extensions can only be registered during initialization.
@@ -765,17 +758,7 @@ partial def importModulesCore (imports : Array Import) : ImportStateM Unit := do
moduleNames := s.moduleNames.push i.module
}
/--
Construct environment from `importModulesCore` results.
If `leakEnv` is true, we mark the environment as persistent, which means it
will not be freed. We set this when the object would survive until the end of
the process anyway. In exchange, RC updates are avoided, which is especially
important when they would be atomic because the environment is shared across
threads (potentially, storing it in an `IO.Ref` is sufficient for marking it
as such). -/
def finalizeImport (s : ImportState) (imports : Array Import) (opts : Options) (trustLevel : UInt32 := 0)
(leakEnv := false) : IO Environment := do
def finalizeImport (s : ImportState) (imports : Array Import) (opts : Options) (trustLevel : UInt32 := 0) : IO Environment := do
let numConsts := s.moduleData.foldl (init := 0) fun numConsts mod =>
numConsts + mod.constants.size + mod.extraConstNames.size
let mut const2ModIdx : HashMap Name ModuleIdx := mkHashMap (capacity := numConsts)
@@ -793,7 +776,7 @@ def finalizeImport (s : ImportState) (imports : Array Import) (opts : Options) (
const2ModIdx := const2ModIdx.insert cname modIdx
let constants : ConstMap := SMap.fromHashMap constantMap false
let exts mkInitialExtensionStates
let mut env : Environment := {
let env : Environment := {
const2ModIdx := const2ModIdx
constants := constants
extraConstNames := {}
@@ -807,35 +790,18 @@ def finalizeImport (s : ImportState) (imports : Array Import) (opts : Options) (
moduleData := s.moduleData
}
}
env setImportedEntries env s.moduleData
if leakEnv then
/- Mark persistent a first time before `finalizePersistenExtensions`, which
avoids costly MT markings when e.g. an interpreter closure (which
contains the environment) is put in an `IO.Ref`. This can happen in e.g.
initializers of user environment extensions and is wasteful because the
environment is marked persistent immediately afterwards anyway when the
constructed extension including the closure is ultimately stored in the
initialized constant. We have seen significant savings in `open Mathlib`
timings, where we have both a big environment and interpreted environment
extensions, from this. There is no significant extra cost to calling
`markPersistent` multiple times like this. -/
env := Runtime.markPersistent env
env finalizePersistentExtensions env s.moduleData opts
if leakEnv then
/- Ensure the final environment including environment extension states is
marked persistent as documented. -/
env := Runtime.markPersistent env
let env setImportedEntries env s.moduleData
let env finalizePersistentExtensions env s.moduleData opts
pure env
@[export lean_import_modules]
def importModules (imports : Array Import) (opts : Options) (trustLevel : UInt32 := 0)
(leakEnv := false) : IO Environment := profileitIO "import" opts do
def importModules (imports : Array Import) (opts : Options) (trustLevel : UInt32 := 0) : IO Environment := profileitIO "import" opts do
for imp in imports do
if imp.module matches .anonymous then
throw <| IO.userError "import failed, trying to import module with anonymous name"
withImporting do
let (_, s) importModulesCore imports |>.run
finalizeImport (leakEnv := leakEnv) s imports opts trustLevel
finalizeImport s imports opts trustLevel
/--
Create environment object from imports and free compacted regions after calling `act`. No live references to the

View File

@@ -60,7 +60,7 @@ def bar ⦃x : Nat⦄ : Nat := x
#check bar -- bar : ⦃x : Nat⦄ → Nat
```
See also [the Lean manual](https://lean-lang.org/lean4/doc/expressions.html#implicit-arguments).
See also the Lean manual: https://lean-lang.org/lean4/doc/expressions.html#implicit-arguments
-/
inductive BinderInfo where
/-- Default binder annotation, e.g. `(x : α)` -/
@@ -300,8 +300,8 @@ inductive Expr where
above it (i.e. introduced by a `lam`, `forallE`, or `letE`).
The `deBruijnIndex` parameter is the *de-Bruijn* index for the bound
variable. See [the Wikipedia page on de-Bruijn indices](https://en.wikipedia.org/wiki/De_Bruijn_index)
for additional information.
variable. See [here](https://en.wikipedia.org/wiki/De_Bruijn_index)
for additional information on de-Bruijn indexes.
For example, consider the expression `fun x : Nat => forall y : Nat, x = y`.
The `x` and `y` variables in the equality expression are constructed
@@ -319,11 +319,11 @@ inductive Expr where
| bvar (deBruijnIndex : Nat)
/--
The `fvar` constructor represent free variables. These *free* variable
The `fvar` constructor represent free variables. These /free/ variable
occurrences are not bound by an earlier `lam`, `forallE`, or `letE`
constructor and its binder exists in a local context only.
Note that Lean uses the *locally nameless approach*. See [McBride and McKinna](https://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.365.2479&rep=rep1&type=pdf)
Note that Lean uses the /locally nameless approach/. See [here](https://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.365.2479&rep=rep1&type=pdf)
for additional details.
When "visiting" the body of a binding expression (i.e. `lam`, `forallE`, or `letE`),
@@ -361,7 +361,7 @@ inductive Expr where
A function application.
For example, the natural number one, i.e. `Nat.succ Nat.zero` is represented as
``Expr.app (.const `Nat.succ []) (.const .zero [])``.
`Expr.app (.const `Nat.succ []) (.const .zero [])`
Note that multiple arguments are represented using partial application.
For example, the two argument application `f x y` is represented as
@@ -387,15 +387,15 @@ inductive Expr where
For example:
- `forall x : Prop, x ∧ x`:
```lean
Expr.forallE `x (.sort .zero)
(.app (.app (.const `And []) (.bvar 0)) (.bvar 0)) .default
```
```lean
Expr.forallE `x (.sort .zero)
(.app (.app (.const `And []) (.bvar 0)) (.bvar 0)) .default
```
- `Nat → Bool`:
```lean
Expr.forallE `a (.const `Nat [])
(.const `Bool []) .default
```
```lean
Expr.forallE `a (.const `Nat [])
(.const `Bool []) .default
```
-/
| forallE (binderName : Name) (binderType : Expr) (body : Expr) (binderInfo : BinderInfo)
@@ -450,11 +450,11 @@ inductive Expr where
The type of `struct` must be an structure-like inductive type. That is, it has only one
constructor, is not recursive, and it is not an inductive predicate. The kernel and elaborators
check whether the `typeName` matches the type of `struct`, and whether the (zero-based) index
is valid (i.e., it is smaller than the number of constructor fields).
is valid (i.e., it is smaller than the numbef of constructor fields).
When exporting Lean developments to other systems, `proj` can be replaced with `typeName`.`rec`
applications.
Example, given `a : Nat × Bool`, `a.1` is represented as
Example, given `a : Nat x Bool`, `a.1` is represented as
```
.proj `Prod 0 a
```
@@ -774,8 +774,8 @@ instance : BEq Expr where
beq := Expr.eqv
/--
Return `true` iff `a` and `b` are equal.
Binder names and annotations are taken into account.
Return true iff `a` and `b` are equal.
Binder names and annotations are taking into account.
-/
@[extern "lean_expr_equal"]
opaque equal (a : @& Expr) (b : @& Expr) : Bool
@@ -831,7 +831,7 @@ def isConst : Expr → Bool
| _ => false
/--
Return `true` if the given expression is a constant of the given name.
Return `true` if the given expression is a constant of the give name.
Examples:
- `` (.const `Nat []).isConstOf `Nat `` is `true`
- `` (.const `Nat []).isConstOf `False `` is `false`
@@ -1332,14 +1332,6 @@ lambda expression. See docstring for `betaRev` for examples.
def beta (f : Expr) (args : Array Expr) : Expr :=
betaRev f args.reverse
/--
Count the number of lambdas at the head of the given expression.
-/
def getNumHeadLambdas : Expr Nat
| .lam _ _ b _ => getNumHeadLambdas b + 1
| .mdata _ b => getNumHeadLambdas b
| _ => 0
/--
Return true if the given expression is the function of an expression that is target for (head) beta reduction.
If `useZeta = true`, then `let`-expressions are visited. That is, it assumes

View File

@@ -205,9 +205,6 @@ def ofNat : Nat → Level
| 0 => levelZero
| n+1 => mkLevelSucc (ofNat n)
instance instOfNat (n : Nat) : OfNat Level n where
ofNat := ofNat n
def addOffsetAux : Nat Level Level
| 0, u => u
| (n+1), u => addOffsetAux n (mkLevelSucc u)

View File

@@ -10,12 +10,6 @@ def logLint [Monad m] [MonadLog m] [AddMessageContext m] [MonadOptions m]
(linterOption : Lean.Option Bool) (stx : Syntax) (msg : MessageData) : m Unit :=
logWarningAt stx (.tagged linterOption.name m!"{msg} [{linterOption.name}]")
/-- If `linterOption` is true, print a linter warning message at the position determined by `stx`.
-/
def logLintIf [Monad m] [MonadLog m] [AddMessageContext m] [MonadOptions m]
(linterOption : Lean.Option Bool) (stx : Syntax) (msg : MessageData) : m Unit := do
if linterOption.get ( getOptions) then logLint linterOption stx msg
/-- Go upwards through the given `tree` starting from the smallest node that
contains the given `range` and collect all `MacroExpansionInfo`s on the way up.
The result is `some []` if no `MacroExpansionInfo` was found on the way and

View File

@@ -60,8 +60,8 @@ where
-- Drawback: cost.
return e
else match mode with
| .reduce => DiscrTree.reduce e {}
| .reduceSimpleOnly => DiscrTree.reduce e { iota := false, proj := .no }
| .reduce => DiscrTree.reduce e (simpleReduce := false)
| .reduceSimpleOnly => DiscrTree.reduce e (simpleReduce := true)
| .none => return e
lt (a b : Expr) : MetaM Bool := do

View File

@@ -72,11 +72,7 @@ partial def visit (e : Expr) : M Expr := do
end AbstractNestedProofs
/-- Replace proofs nested in `e` with new lemmas. The new lemmas have names of the form `mainDeclName.proof_<idx>` -/
def abstractNestedProofs (mainDeclName : Name) (e : Expr) : MetaM Expr := do
if ( isProof e) then
-- `e` is a proof itself. So, we don't abstract nested proofs
return e
else
AbstractNestedProofs.visit e |>.run { baseName := mainDeclName } |>.run |>.run' { nextIdx := 1 }
def abstractNestedProofs (mainDeclName : Name) (e : Expr) : MetaM Expr :=
AbstractNestedProofs.visit e |>.run { baseName := mainDeclName } |>.run |>.run' { nextIdx := 1 }
end Lean.Meta

View File

@@ -78,6 +78,14 @@ structure Config where
we may want to notify the caller that the TC problem may be solvable
later after it assigns `?m`. -/
isDefEqStuckEx : Bool := false
/--
Controls which definitions and theorems can be unfolded by `isDefEq` and `whnf`.
-/
transparency : TransparencyMode := TransparencyMode.default
/-- If zetaNonDep == false, then non dependent let-decls are not zeta expanded. -/
zetaNonDep : Bool := true
/-- When `trackZeta == true`, we store zetaFVarIds all free variables that have been zeta-expanded. -/
trackZeta : Bool := false
/-- Enable/disable the unification hints feature. -/
unificationHints : Bool := true
/-- Enables proof irrelevance at `isDefEq` -/
@@ -91,24 +99,8 @@ structure Config where
assignSyntheticOpaque : Bool := false
/-- Enable/Disable support for offset constraints such as `?x + 1 =?= e` -/
offsetCnstrs : Bool := true
/--
Controls which definitions and theorems can be unfolded by `isDefEq` and `whnf`.
-/
transparency : TransparencyMode := TransparencyMode.default
/--
When `trackZeta = true`, we track all free variables that have been zeta-expanded.
That is, suppose the local context contains
the declaration `x : t := v`, and we reduce `x` to `v`, then we insert `x` into `State.zetaFVarIds`.
We use `trackZeta` to discover which let-declarations `let x := v; e` can be represented as `(fun x => e) v`.
When we find these declarations we set their `nonDep` flag with `true`.
To find these let-declarations in a given term `s`, we
1- Reset `State.zetaFVarIds`
2- Set `trackZeta := true`
3- Type-check `s`.
-/
trackZeta : Bool := false
/-- Eta for structures configuration mode. -/
etaStruct : EtaStructMode := .all
etaStruct : EtaStructMode := .all
/--
Function parameter information cache.
@@ -374,7 +366,7 @@ section Methods
variable [MonadControlT MetaM n] [Monad n]
@[inline] def modifyCache (f : Cache Cache) : MetaM Unit :=
modify fun { mctx, cache, zetaFVarIds, postponed } => { mctx, cache := f cache, zetaFVarIds, postponed }
modify fun mctx, cache, zetaFVarIds, postponed => mctx, f cache, zetaFVarIds, postponed
@[inline] def modifyInferTypeCache (f : InferTypeCache InferTypeCache) : MetaM Unit :=
modifyCache fun ic, c1, c2, c3, c4, c5, c6 => f ic, c1, c2, c3, c4, c5, c6
@@ -789,9 +781,6 @@ def elimMVarDeps (xs : Array Expr) (e : Expr) (preserveOrder : Bool := false) :
@[inline] def withConfig (f : Config Config) : n α n α :=
mapMetaM <| withReader (fun ctx => { ctx with config := f ctx.config })
/--
Executes `x` tracking zeta reductions `Config.trackZeta := true`
-/
@[inline] def withTrackingZeta (x : n α) : n α :=
withConfig (fun cfg => { cfg with trackZeta := true }) x

View File

@@ -5,7 +5,6 @@ Authors: Leonardo de Moura
-/
import Lean.Meta.KAbstract
import Lean.Meta.Check
import Lean.Meta.AppBuilder
namespace Lean.Meta
@@ -51,17 +50,13 @@ def CasesOnApp.toExpr (c : CasesOnApp) : Expr :=
/--
Given a `casesOn` application `c` of the form
```
casesOn As (fun is x => motive[is, xs]) is major (fun ys_1 => (alt_1 : motive (C_1[ys_1])) ... (fun ys_n => (alt_n : motive (C_n[ys_n]) remaining
casesOn As (fun is x => motive[i, xs]) is major (fun ys_1 => (alt_1 : motive (C_1[ys_1])) ... (fun ys_n => (alt_n : motive (C_n[ys_n]) remaining
```
and an expression `e : B[is, major]`, construct the term
```
casesOn As (fun is x => B[is, x] → motive[i, xs]) is major (fun ys_1 (y : B[_, C_1[ys_1]]) => (alt_1 : motive (C_1[ys_1])) ... (fun ys_n (y : B[_, C_n[ys_n]]) => (alt_n : motive (C_n[ys_n]) e remaining
casesOn As (fun is x => B[is, x] → motive[i, xs]) is major (fun ys_1 (y : B[C_1[ys_1]]) => (alt_1 : motive (C_1[ys_1])) ... (fun ys_n (y : B[C_n[ys_n]]) => (alt_n : motive (C_n[ys_n]) e remaining
```
We use `kabstract` to abstract the `is` and `major` from `B[is, major]`.
This is used in in `Lean.Elab.PreDefinition.WF.Fix` when replacing recursive calls with calls to
the argument provided by `fix` to refine the termination argument, which may mention `major`.
See there for how to use this function.
-/
def CasesOnApp.addArg (c : CasesOnApp) (arg : Expr) (checkIfRefined : Bool := false) : MetaM CasesOnApp := do
lambdaTelescope c.motive fun motiveArgs motiveBody => do
@@ -111,68 +106,11 @@ where
throwError "failed to add argument to `casesOn` application, argument type was not refined by `casesOn`"
return altsNew
/-- Similar to `CasesOnApp.addArg`, but returns `none` on failure. -/
/-- Similar `CasesOnApp.addArg`, but returns `none` on failure. -/
def CasesOnApp.addArg? (c : CasesOnApp) (arg : Expr) (checkIfRefined : Bool := false) : MetaM (Option CasesOnApp) :=
try
return some ( c.addArg arg checkIfRefined)
catch _ =>
return none
/--
Given a `casesOn` application `c` of the form
```
casesOn As (fun is x => motive[is, xs]) is major (fun ys_1 => (alt_1 : motive (C_1[ys_1])) ... (fun ys_n => (alt_n : motive (C_n[ys_n]) remaining
```
and an expression `B[is, major]` (which may not be a type, e.g. `n : Nat`)
for every alternative `i`, construct the expression `fun ys_i => B[_, C_i[ys_i]]`
This is similar to `CasesOnApp.addArg` when you only have an expression to
refined, and not a type with a value.
This is used in in `Lean.Elab.PreDefinition.WF.GuessFix` when constructing the context of recursive
calls to refine the functions' paramter, which may mention `major`.
See there for how to use this function.
-/
def CasesOnApp.refineThrough (c : CasesOnApp) (e : Expr) : MetaM (Array Expr) :=
lambdaTelescope c.motive fun motiveArgs _motiveBody => do
unless motiveArgs.size == c.indices.size + 1 do
throwError "failed to transfer argument through `casesOn` application, motive must be lambda expression with #{c.indices.size + 1} binders"
let discrs := c.indices ++ #[c.major]
let mut eAbst := e
for motiveArg in motiveArgs.reverse, discr in discrs.reverse do
eAbst kabstract eAbst discr
eAbst := eAbst.instantiate1 motiveArg
-- Let's create something thats a `Sort` and mentions `e`
-- (recall that `e` itself possibly isn't a type),
-- by writing `e = e`, so that we can use it as a motive
let eEq mkEq eAbst eAbst
let motive mkLambdaFVars motiveArgs eEq
let us := if c.propOnly then c.us else levelZero :: c.us.tail!
-- Now instantiate the casesOn wth this synthetic motive
let aux := mkAppN (mkConst c.declName us) c.params
let aux := mkApp aux motive
let aux := mkAppN aux discrs
check aux
let auxType inferType aux
-- The type of the remaining arguments will mention `e` instantiated for each arg
-- so extract them
forallTelescope auxType fun altAuxs _ => do
let altAuxTys altAuxs.mapM (inferType ·)
(Array.zip c.altNumParams altAuxTys).mapM fun (altNumParams, altAuxTy) => do
forallBoundedTelescope altAuxTy altNumParams fun fvs body => do
unless fvs.size = altNumParams do
throwError "failed to transfer argument through `casesOn` application, alt type must be telescope with #{altNumParams} arguments"
-- extract type from our synthetic equality
let body := body.getArg! 2
-- and abstract over the parameters of the alternatives, so that we can safely pass the Expr out
mkLambdaFVars fvs body
/-- A non-failing version of `CasesOnApp.refineThrough` -/
def CasesOnApp.refineThrough? (c : CasesOnApp) (e : Expr) :
MetaM (Option (Array Expr)) :=
try
return some ( c.refineThrough e)
catch _ =>
return none
end Lean.Meta

View File

@@ -28,12 +28,12 @@ partial def collectMVars (e : Expr) : StateRefT CollectMVars.State MetaM Unit :=
| none => pure ()
| some d => collectMVars (mkMVar d.mvarIdPending)
/-- Return metavariables occurring in the given expression. See `collectMVars` -/
/-- Return metavariables in occurring the given expression. See `collectMVars` -/
def getMVars (e : Expr) : MetaM (Array MVarId) := do
let (_, s) (collectMVars e).run {}
pure s.result
/-- Similar to `getMVars`, but removes delayed assignments. -/
/-- Similar to getMVars, but removes delayed assignments. -/
def getMVarsNoDelayed (e : Expr) : MetaM (Array MVarId) := do
let mvarIds getMVars e
mvarIds.filterM fun mvarId => not <$> mvarId.isDelayedAssigned

View File

@@ -30,7 +30,7 @@ namespace Lean.Meta.DiscrTree
Recall that projections from classes are **NOT** reducible.
For example, the expressions `Add.add α (ringAdd ?α ?s) ?x ?x`
and `Add.add Nat Nat.hasAdd a b` generates paths with the following keys
respectively
respctively
```
⟨Add.add, 4⟩, *, *, *, *
⟨Add.add, 4⟩, *, *, ⟨a,0⟩, ⟨b,0⟩
@@ -48,27 +48,33 @@ namespace Lean.Meta.DiscrTree
2- Distinguish partial applications `f a`, `f a b`, and `f a b c`.
-/
def Key.ctorIdx : Key Nat
| .star => 0
| .other => 1
| .lit .. => 2
| .fvar .. => 3
| .const .. => 4
| .arrow => 5
| .proj .. => 6
/--
Auxiliary function used to implement `Key.lt`.
Note that the `DiscrTree` implementation assumes that the `.star` key is the smallest one, and `.ground` key is the biggest.
-/
def Key.ctorIdx : Key s Nat
| .star => 0
| .other => 1
| .lit .. => 2
| .fvar .. => 3
| .const .. => 4
| .arrow => 5
| .proj .. => 6
| .ground .. => 7
def Key.lt : Key Key Bool
def Key.lt : Key s Key s Bool
| .lit v₁, .lit v₂ => v₁ < v₂
| .fvar n₁ a₁, .fvar n₂ a₂ => Name.quickLt n₁.name n₂.name || (n₁ == n₂ && a₁ < a₂)
| .const n₁ a₁, .const n₂ a₂ => Name.quickLt n₁ n₂ || (n₁ == n₂ && a₁ < a₂)
| .proj s₁ i₁ a₁, .proj s₂ i₂ a₂ => Name.quickLt s₁ s₂ || (s₁ == s₂ && i₁ < i₂) || (s₁ == s₂ && i₁ == i₂ && a₁ < a₂)
| k₁, k₂ => k₁.ctorIdx < k₂.ctorIdx
instance : LT Key := fun a b => Key.lt a b
instance (a b : Key) : Decidable (a < b) := inferInstanceAs (Decidable (Key.lt a b))
instance : LT (Key s) := fun a b => Key.lt a b
instance (a b : Key s) : Decidable (a < b) := inferInstanceAs (Decidable (Key.lt a b))
def Key.format : Key Format
def Key.format : Key s Format
| .star => "*"
| .ground => "*g"
| .other => ""
| .lit (Literal.natVal v) => Std.format v
| .lit (Literal.strVal v) => repr v
@@ -77,41 +83,41 @@ def Key.format : Key → Format
| .fvar k _ => Std.format k.name
| .arrow => ""
instance : ToFormat Key := Key.format
instance : ToFormat (Key s) := Key.format
def Key.arity : Key Nat
def Key.arity : (Key s) Nat
| .const _ a => a
| .fvar _ a => a
| .arrow => 2
| .proj _ _ a => 1 + a
| _ => 0
instance : Inhabited (Trie α) := .node #[] #[]
instance : Inhabited (Trie α s) := .node #[] #[]
def empty : DiscrTree α := { root := {} }
def empty : DiscrTree α s := { root := {} }
partial def Trie.format [ToFormat α] : Trie α Format
partial def Trie.format [ToFormat α] : Trie α s Format
| .node vs cs => Format.group $ Format.paren $
"node" ++ (if vs.isEmpty then Format.nil else " " ++ Std.format vs)
++ Format.join (cs.toList.map fun k, c => Format.line ++ Format.paren (Std.format k ++ " => " ++ format c))
instance [ToFormat α] : ToFormat (Trie α) := Trie.format
instance [ToFormat α] : ToFormat (Trie α s) := Trie.format
partial def format [ToFormat α] (d : DiscrTree α) : Format :=
partial def format [ToFormat α] (d : DiscrTree α s) : Format :=
let (_, r) := d.root.foldl
(fun (p : Bool × Format) k c =>
(false, p.2 ++ (if p.1 then Format.nil else Format.line) ++ Format.paren (Std.format k ++ " => " ++ Std.format c)))
(true, Format.nil)
Format.group r
instance [ToFormat α] : ToFormat (DiscrTree α) := format
instance [ToFormat α] : ToFormat (DiscrTree α s) := format
/-- The discrimination tree ignores implicit arguments and proofs.
We use the following auxiliary id as a "mark". -/
private def tmpMVarId : MVarId := { name := `_discr_tree_tmp }
private def tmpStar := mkMVar tmpMVarId
instance : Inhabited (DiscrTree α) where
instance : Inhabited (DiscrTree α s) where
default := {}
/--
@@ -247,18 +253,24 @@ def mkNoindexAnnotation (e : Expr) : Expr :=
def hasNoindexAnnotation (e : Expr) : Bool :=
annotation? `noindex e |>.isSome
def mkGroundAnnotation (e : Expr) : Expr :=
mkAnnotation `ground e
def hasGroundAnnotation (e : Expr) : Bool :=
annotation? `ground e |>.isSome
/--
Reduction procedure for the discrimination tree indexing.
The parameter `config` controls how aggressively the term is reduced.
The parameter `simpleReduce` controls how aggressive the term is reduced.
The parameter at type `DiscrTree` controls this value.
See comment at `DiscrTree`.
-/
partial def reduce (e : Expr) (config : WhnfCoreConfig) : MetaM Expr := do
let e whnfCore e config
partial def reduce (e : Expr) (simpleReduce : Bool) : MetaM Expr := do
let e whnfCore e (simpleReduceOnly := simpleReduce)
match ( unfoldDefinition? e) with
| some e => reduce e config
| some e => reduce e simpleReduce
| none => match e.etaExpandedStrict? with
| some e => reduce e config
| some e => reduce e simpleReduce
| none => return e
/--
@@ -307,31 +319,35 @@ private def elimLooseBVarsByBeta (e : Expr) : CoreM Expr :=
Reduce `e` until we get an irreducible term (modulo current reducibility setting) or the resulting term
is a bad key (see comment at `isBadKey`).
We use this method instead of `reduce` for root terms at `pushArgs`. -/
private partial def reduceUntilBadKey (e : Expr) (config : WhnfCoreConfig) : MetaM Expr := do
private partial def reduceUntilBadKey (e : Expr) (simpleReduce : Bool) : MetaM Expr := do
let e step e
match e.etaExpandedStrict? with
| some e => reduceUntilBadKey e config
| some e => reduceUntilBadKey e simpleReduce
| none => return e
where
step (e : Expr) := do
let e whnfCore e config
let e whnfCore e (simpleReduceOnly := simpleReduce)
match ( unfoldDefinition? e) with
| some e' => if isBadKey e'.getAppFn then return e else step e'
| none => return e
/-- whnf for the discrimination tree module -/
def reduceDT (e : Expr) (root : Bool) (config : WhnfCoreConfig) : MetaM Expr :=
if root then reduceUntilBadKey e config else reduce e config
def reduceDT (e : Expr) (root : Bool) (simpleReduce : Bool) : MetaM Expr :=
if root then reduceUntilBadKey e simpleReduce else reduce e simpleReduce
/- Remark: we use `shouldAddAsStar` only for nested terms, and `root == false` for nested terms -/
private def pushArgs (root : Bool) (todo : Array Expr) (e : Expr) (config : WhnfCoreConfig) : MetaM (Key × Array Expr) := do
private def pushArgs (root : Bool) (todo : Array Expr) (e : Expr) : MetaM (Key s × Array Expr) := do
if hasNoindexAnnotation e then
return (.star, todo)
else if hasGroundAnnotation e then
if root then
throwError "`ground` pattern modifier cannot be use in root terms{indentExpr e}"
return (.ground, todo)
else
let e reduceDT e root config
let e reduceDT e root (simpleReduce := s)
let fn := e.getAppFn
let push (k : Key) (nargs : Nat) (todo : Array Expr): MetaM (Key × Array Expr) := do
let push (k : Key s) (nargs : Nat) (todo : Array Expr): MetaM (Key s × Array Expr) := do
let info getFunInfoNArgs fn nargs
let todo pushArgsAux info.paramInfo (nargs-1) e todo
return (k, todo)
@@ -377,24 +393,24 @@ private def pushArgs (root : Bool) (todo : Array Expr) (e : Expr) (config : Whnf
| _ =>
return (.other, todo)
partial def mkPathAux (root : Bool) (todo : Array Expr) (keys : Array Key) (config : WhnfCoreConfig) : MetaM (Array Key) := do
partial def mkPathAux (root : Bool) (todo : Array Expr) (keys : Array (Key s)) : MetaM (Array (Key s)) := do
if todo.isEmpty then
return keys
else
let e := todo.back
let todo := todo.pop
let (k, todo) pushArgs root todo e config
mkPathAux false todo (keys.push k) config
let (k, todo) pushArgs root todo e
mkPathAux false todo (keys.push k)
private def initCapacity := 8
def mkPath (e : Expr) (config : WhnfCoreConfig) : MetaM (Array Key) := do
def mkPath (e : Expr) : MetaM (Array (Key s)) := do
withReducible do
let todo : Array Expr := .mkEmpty initCapacity
let keys : Array Key := .mkEmpty initCapacity
mkPathAux (root := true) (todo.push e) keys config
let keys : Array (Key s) := .mkEmpty initCapacity
mkPathAux (root := true) (todo.push e) keys
private partial def createNodes (keys : Array Key) (v : α) (i : Nat) : Trie α :=
private partial def createNodes (keys : Array (Key s)) (v : α) (i : Nat) : Trie α s :=
if h : i < keys.size then
let k := keys.get i, h
let c := createNodes keys v (i+1)
@@ -421,20 +437,20 @@ where
vs.push v
termination_by loop i => vs.size - i
private partial def insertAux [BEq α] (keys : Array Key) (v : α) (config : WhnfCoreConfig) : Nat Trie α Trie α
private partial def insertAux [BEq α] (keys : Array (Key s)) (v : α) : Nat Trie α s Trie α s
| i, .node vs cs =>
if h : i < keys.size then
let k := keys.get i, h
let c := Id.run $ cs.binInsertM
(fun a b => a.1 < b.1)
(fun _, s => let c := insertAux keys v config (i+1) s; (k, c)) -- merge with existing
(fun _, s => let c := insertAux keys v (i+1) s; (k, c)) -- merge with existing
(fun _ => let c := createNodes keys v (i+1); (k, c))
(k, default)
.node vs c
else
.node (insertVal vs v) cs
def insertCore [BEq α] (d : DiscrTree α) (keys : Array Key) (v : α) (config : WhnfCoreConfig) : DiscrTree α :=
def insertCore [BEq α] (d : DiscrTree α s) (keys : Array (Key s)) (v : α) : DiscrTree α s :=
if keys.isEmpty then panic! "invalid key sequence"
else
let k := keys[0]!
@@ -443,15 +459,15 @@ def insertCore [BEq α] (d : DiscrTree α) (keys : Array Key) (v : α) (config :
let c := createNodes keys v 1
{ root := d.root.insert k c }
| some c =>
let c := insertAux keys v config 1 c
let c := insertAux keys v 1 c
{ root := d.root.insert k c }
def insert [BEq α] (d : DiscrTree α) (e : Expr) (v : α) (config : WhnfCoreConfig) : MetaM (DiscrTree α) := do
let keys mkPath e config
return d.insertCore keys v config
def insert [BEq α] (d : DiscrTree α s) (e : Expr) (v : α) : MetaM (DiscrTree α s) := do
let keys mkPath e
return d.insertCore keys v
private def getKeyArgs (e : Expr) (isMatch root : Bool) (config : WhnfCoreConfig) : MetaM (Key × Array Expr) := do
let e reduceDT e root config
private def getKeyArgs (e : Expr) (isMatch root : Bool) : MetaM (Key s × Array Expr) := do
let e reduceDT e root (simpleReduce := s)
unless root do
-- See pushArgs
if let some v := toNatLit? e then
@@ -530,22 +546,22 @@ private def getKeyArgs (e : Expr) (isMatch root : Bool) (config : WhnfCoreConfig
| _ =>
return (.other, #[])
private abbrev getMatchKeyArgs (e : Expr) (root : Bool) (config : WhnfCoreConfig) : MetaM (Key × Array Expr) :=
getKeyArgs e (isMatch := true) (root := root) (config := config)
private abbrev getMatchKeyArgs (e : Expr) (root : Bool) : MetaM (Key s × Array Expr) :=
getKeyArgs e (isMatch := true) (root := root)
private abbrev getUnifyKeyArgs (e : Expr) (root : Bool) (config : WhnfCoreConfig) : MetaM (Key × Array Expr) :=
getKeyArgs e (isMatch := false) (root := root) (config := config)
private abbrev getUnifyKeyArgs (e : Expr) (root : Bool) : MetaM (Key s × Array Expr) :=
getKeyArgs e (isMatch := false) (root := root)
private def getStarResult (d : DiscrTree α) : Array α :=
private def getStarResult (d : DiscrTree α s) : Array α :=
let result : Array α := .mkEmpty initCapacity
match d.root.find? .star with
| none => result
| none => result
| some (.node vs _) => result ++ vs
private abbrev findKey (cs : Array (Key × Trie α)) (k : Key) : Option (Key × Trie α) :=
private abbrev findKey (cs : Array (Key s × Trie α s)) (k : Key s) : Option (Key s × Trie α s) :=
cs.binSearch (k, default) (fun a b => a.1 < b.1)
private partial def getMatchLoop (todo : Array Expr) (c : Trie α) (result : Array α) (config : WhnfCoreConfig) : MetaM (Array α) := do
private partial def getMatchLoop (todo : Array Expr) (c : Trie α s) (result : Array α) : MetaM (Array α) := do
match c with
| .node vs cs =>
if todo.isEmpty then
@@ -555,21 +571,32 @@ private partial def getMatchLoop (todo : Array Expr) (c : Trie α) (result : Arr
else
let e := todo.back
let todo := todo.pop
let first := cs[0]! /- Recall that `Key.star` is the minimal key -/
let (k, args) getMatchKeyArgs e (root := false) config
let (k, args) getMatchKeyArgs e (root := false)
/- We must always visit `Key.star` edges since they are wildcards.
Thus, `todo` is not used linearly when there is `Key.star` edge
and there is an edge for `k` and `k != Key.star`. -/
let visitStar (result : Array α) : MetaM (Array α) :=
let first := cs[0]! /- Recall that `Key.star` is the minimal key -/
if first.1 == .star then
getMatchLoop todo first.2 result config
getMatchLoop todo first.2 result
else
return result
let visitNonStar (k : Key) (args : Array Expr) (result : Array α) : MetaM (Array α) :=
let visitGround (result : Array α) : MetaM (Array α) := do
let last := cs.back /- Recall that `Key.ground` is the maximal key -/
if last.1 == .ground then
let e instantiateMVars e /- TODO(Leo): check whether this is perf bottleneck. -/
if e.hasFVar || e.hasExprMVar then
return result
else
getMatchLoop todo last.2 result
else
return result
let visitNonStar (k : Key s) (args : Array Expr) (result : Array α) : MetaM (Array α) :=
match findKey cs k with
| none => return result
| some c => getMatchLoop (todo ++ args) c.2 result config
| some c => getMatchLoop (todo ++ args) c.2 result
let result visitStar result
let result visitGround result
match k with
| .star => return result
/-
@@ -580,32 +607,32 @@ private partial def getMatchLoop (todo : Array Expr) (c : Trie α) (result : Arr
| .arrow => visitNonStar .other #[] ( visitNonStar k args result)
| _ => visitNonStar k args result
private def getMatchRoot (d : DiscrTree α) (k : Key) (args : Array Expr) (result : Array α) (config : WhnfCoreConfig) : MetaM (Array α) :=
private def getMatchRoot (d : DiscrTree α s) (k : Key s) (args : Array Expr) (result : Array α) : MetaM (Array α) :=
match d.root.find? k with
| none => return result
| some c => getMatchLoop args c result config
| some c => getMatchLoop args c result
private def getMatchCore (d : DiscrTree α) (e : Expr) (config : WhnfCoreConfig) : MetaM (Key × Array α) :=
private def getMatchCore (d : DiscrTree α s) (e : Expr) : MetaM (Key s × Array α) :=
withReducible do
let result := getStarResult d
let (k, args) getMatchKeyArgs e (root := true) config
let (k, args) getMatchKeyArgs e (root := true)
match k with
| .star => return (k, result)
/- See note about "dep-arrow vs arrow" at `getMatchLoop` -/
| .arrow => return (k, ( getMatchRoot d k args ( getMatchRoot d .other #[] result config) config))
| _ => return (k, ( getMatchRoot d k args result config))
| .arrow => return (k, ( getMatchRoot d k args ( getMatchRoot d .other #[] result)))
| _ => return (k, ( getMatchRoot d k args result))
/--
Find values that match `e` in `d`.
-/
def getMatch (d : DiscrTree α) (e : Expr) (config : WhnfCoreConfig) : MetaM (Array α) :=
return ( getMatchCore d e config).2
def getMatch (d : DiscrTree α s) (e : Expr) : MetaM (Array α) :=
return ( getMatchCore d e).2
/--
Similar to `getMatch`, but returns solutions that are prefixes of `e`.
We store the number of ignored arguments in the result.-/
partial def getMatchWithExtra (d : DiscrTree α) (e : Expr) (config : WhnfCoreConfig) : MetaM (Array (α × Nat)) := do
let (k, result) getMatchCore d e config
partial def getMatchWithExtra (d : DiscrTree α s) (e : Expr) : MetaM (Array (α × Nat)) := do
let (k, result) getMatchCore d e
let result := result.map (·, 0)
if !e.isApp then
return result
@@ -614,8 +641,8 @@ partial def getMatchWithExtra (d : DiscrTree α) (e : Expr) (config : WhnfCoreCo
else
go e.appFn! 1 result
where
mayMatchPrefix (k : Key) : MetaM Bool :=
let cont (k : Key) : MetaM Bool :=
mayMatchPrefix (k : Key s) : MetaM Bool :=
let cont (k : Key s) : MetaM Bool :=
if d.root.find? k |>.isSome then
return true
else
@@ -627,15 +654,15 @@ where
| _ => return false
go (e : Expr) (numExtra : Nat) (result : Array (α × Nat)) : MetaM (Array (α × Nat)) := do
let result := result ++ ( getMatchCore d e config).2.map (., numExtra)
let result := result ++ ( getMatch d e).map (., numExtra)
if e.isApp then
go e.appFn! (numExtra + 1) result
else
return result
partial def getUnify (d : DiscrTree α) (e : Expr) (config : WhnfCoreConfig) : MetaM (Array α) :=
partial def getUnify (d : DiscrTree α s) (e : Expr) : MetaM (Array α) :=
withReducible do
let (k, args) getUnifyKeyArgs e (root := true) config
let (k, args) getUnifyKeyArgs e (root := true)
match k with
| .star => d.root.foldlM (init := #[]) fun result k c => process k.arity #[] c result
| _ =>
@@ -644,7 +671,7 @@ partial def getUnify (d : DiscrTree α) (e : Expr) (config : WhnfCoreConfig) : M
| none => return result
| some c => process 0 args c result
where
process (skip : Nat) (todo : Array Expr) (c : Trie α) (result : Array α) : MetaM (Array α) := do
process (skip : Nat) (todo : Array Expr) (c : Trie α s) (result : Array α) : MetaM (Array α) := do
match skip, c with
| skip+1, .node _ cs =>
if cs.isEmpty then
@@ -659,21 +686,33 @@ where
else
let e := todo.back
let todo := todo.pop
let (k, args) getUnifyKeyArgs e (root := false) config
let (k, args) getUnifyKeyArgs e (root := false)
let visitStar (result : Array α) : MetaM (Array α) :=
let first := cs[0]!
if first.1 == .star then
process 0 todo first.2 result
else
return result
let visitNonStar (k : Key) (args : Array Expr) (result : Array α) : MetaM (Array α) :=
let visitGround (result : Array α) : MetaM (Array α) := do
let last := cs.back
if last.1 == .ground then
let e instantiateMVars e /- TODO(Leo): check whether this is perf bottleneck. -/
if e.hasFVar || e.hasExprMVar then
return result
else
process 0 todo last.2 result
else
return result
let visitStarAndGround (result : Array α) : MetaM (Array α) := do
visitGround ( visitStar result)
let visitNonStar (k : Key s) (args : Array Expr) (result : Array α) : MetaM (Array α) :=
match findKey cs k with
| none => return result
| some c => process 0 (todo ++ args) c.2 result
match k with
| .star => cs.foldlM (init := result) fun result k, c => process k.arity todo c result
-- See comment a `getMatch` regarding non-dependent arrows vs dependent arrows
| .arrow => visitNonStar .other #[] ( visitNonStar k args ( visitStar result))
| _ => visitNonStar k args ( visitStar result)
| .arrow => visitNonStar .other #[] ( visitNonStar k args ( visitStarAndGround result))
| _ => visitNonStar k args ( visitStarAndGround result)
end Lean.Meta.DiscrTree

View File

@@ -11,52 +11,84 @@ namespace Lean.Meta
namespace DiscrTree
/--
Discrimination tree key. See `DiscrTree`
(Imperfect) Discrimination tree key. See `DiscrTree`.
The parameter `simpleReduce` controls how aggressive the term is reduced.
-/
inductive Key where
| const : Name Nat Key
| fvar : FVarId Nat Key
| lit : Literal Key
| star : Key
| other : Key
| arrow : Key
| proj : Name Nat Nat Key
inductive Key (simpleReduce : Bool) where
/-- A constant application with name `declName` and `arity` arguments. -/
| const (declName : Name) (arity : Nat)
/--
Free variables (and arity). Thus, an entry in the discrimination tree
may reference hypotheses from the local context.
-/
| fvar (fvarId : FVarId) (arity : Nat)
/-- Literal. -/
| lit (litVal : Literal)
/--
Star or wildcard. We use them to represent metavariables and terms
we want to ignore. We ignore implicit arguments and proofs.
-/
| star
/--
Other terms. We use to represent other kinds of terms
(e.g., nested lambda, forall, sort, etc).
-/
| other
/-- Arrow (aka non dependent arrows). -/
| arrow
/-- Projection (applications). -/
| proj (structName : Name) (projIdx : Nat) (arity : Nat)
/--
Ground terms.
We use to implement the `ground p` pattern annotation.
When the `DiscrTree` is trying to match a term `e` with key the `.ground`,
it succeeds if `e` does not contain free or meta variables.
Note that, in the pattern `ground p`, the term `p` is ignored.
We can also view `ground` as a variant of `star` that matches all ground terms.
-/
| ground : Key simpleReduce
deriving Inhabited, BEq, Repr
protected def Key.hash : Key UInt64
| .const n a => mixHash 5237 $ mixHash (hash n) (hash a)
| .fvar n a => mixHash 3541 $ mixHash (hash n) (hash a)
| .lit v => mixHash 1879 $ hash v
| .star => 7883
| .other => 2411
| .arrow => 17
| .proj s i a => mixHash (hash a) $ mixHash (hash s) (hash i)
protected def Key.hash : Key s UInt64
| Key.const n a => mixHash 5237 $ mixHash (hash n) (hash a)
| Key.fvar n a => mixHash 3541 $ mixHash (hash n) (hash a)
| Key.lit v => mixHash 1879 $ hash v
| Key.star => 7883
| Key.other => 2411
| Key.arrow => 17
| Key.ground => 11
| Key.proj s i a => mixHash (hash a) $ mixHash (hash s) (hash i)
instance : Hashable Key := Key.hash
instance : Hashable (Key s) := Key.hash
/--
Discrimination tree trie. See `DiscrTree`.
-/
inductive Trie (α : Type) where
| node (vs : Array α) (children : Array (Key × Trie α)) : Trie α
inductive Trie (α : Type) (simpleReduce : Bool) where
| node (vs : Array α) (children : Array (Key simpleReduce × Trie α simpleReduce)) : Trie α simpleReduce
end DiscrTree
open DiscrTree
/-!
Notes regarding term reduction at the `DiscrTree` module.
/--
Discrimination trees. It is an index from terms to values of type `α`.
If `simpleReduce := true`, then only simple reduction are performed while
indexing/retrieving terms. For example, `iota` reduction is not performed.
We use `simpleReduce := false` in the type class resolution module,
and `simpleReduce := true` in `simp`.
Motivations:
- In `simp`, we want to have `simp` theorem such as
```
@[simp] theorem liftOn_mk (a : α) (f : αγ) (h : ∀ a₁ a₂, r a₁ a₂ → f a₁ = f a₂) :
Quot.liftOn (Quot.mk r a) f h = f a := rfl
```
If we enable `iota`, then the lhs is reduced to `f a`.
Note that when retrieving terms, we may also disable `beta` and `zeta` reduction.
See issue https://github.com/leanprover/lean4/issues/2669
- During type class resolution, we often want to reduce types using even `iota` and projection reduction.
- During type class resolution, we often want to reduce types using even `iota`.
Example:
```
inductive Ty where
@@ -75,11 +107,7 @@ def f (a b : Ty.bool.interp) : Ty.bool.interp :=
test (.==.) a b
```
-/
/--
Discrimination trees. It is an index from terms to values of type `α`.
-/
structure DiscrTree (α : Type) where
root : PersistentHashMap Key (Trie α) := {}
structure DiscrTree (α : Type) (simpleReduce : Bool) where
root : PersistentHashMap (Key simpleReduce) (Trie α simpleReduce) := {}
end Lean.Meta

View File

@@ -1538,9 +1538,9 @@ private partial def isDefEqQuickOther (t s : Expr) : MetaM LBool := do
However, pattern annotations (`inaccessible?` and `patternWithRef?`) must be consumed.
The frontend relies on the fact that is must not be propagated by `isDefEq`.
Thus, we consume it here. This is a bit hackish since it is very adhoc.
We might have other annotations in the future that we should not preserve.
Perhaps, we should mark the annotations we do want to preserve
(e.g., hints for the pretty printer), and consume all others.
We might other annotations in the future that we should not preserve.
Perhaps, we should mark the annotation we do want to preserve ones
(e.g., hints for the pretty printer), and consume all other
-/
if let some t := patternAnnotation? t then
isDefEqQuick t s
@@ -1567,7 +1567,7 @@ private partial def isDefEqQuickOther (t s : Expr) : MetaM LBool := do
isDefEqQuick t s
/- Remark: we do not eagerly synthesize synthetic metavariables when the constraint is not stuck.
Reason: we may fail to solve a constraint of the form `?x =?= A` when the synthesized instance
is not definitionally equal to `A`. We left the code here as a reminder of this issue. -/
is not definitionally equal to `A`. We left the code here as a remainder of this issue. -/
-- else if (← isSynthetic tFn <&&> trySynthPending tFn) then
-- let t ← instantiateMVars t
-- isDefEqQuick t s
@@ -1597,7 +1597,7 @@ private partial def isDefEqQuickOther (t s : Expr) : MetaM LBool := do
1- The elaborator has a pending list of things to do: Tactics, TC, etc.
2- The elaborator only tries tactics after it tried to solve pending TC problems, delayed elaboratio, etc.
The motivation: avoid unassigned metavariables in goals.
3- Each pending tactic goal is represented as a metavariable. It is marked as `syntheticOpaque` to make it clear
3- Each pending tactic goal is represented as a metavariable. It is marked as `synthethicOpaque` to make it clear
that it should not be assigned by unification.
4- When we abstract a term containing metavariables, we often create new metavariables.
Example: when abstracting `x` at `f ?m`, we obtain `fun x => f (?m' x)`. If `x` is in the scope of `?m`.
@@ -1692,14 +1692,14 @@ where
isDefEqSingleton (structName : Name) (s : Expr) (v : Expr) : MetaM Bool := do
if isClass ( getEnv) structName then
/-
We disable this feature if `structName` is a class. See issue #2011.
We disable this feature is `structName` is a class. See issue #2011.
The example at issue #2011, the following weird
instance was being generated for `Zero (f x)`
```
(@Zero.mk (f x✝) ((@instZero I (fun i => f i) fun i => inst✝¹ i).1 x✝)
```
where `inst✝¹` is the local instance `[∀ i, Zero (f i)]`
Note that this instance is definitionally equal to the expected nicer
Note that this instance is definitinally equal to the expected nicer
instance `inst✝¹ x✝`.
However, the nasty instance trigger nasty unification higher order
constraints later.
@@ -1733,7 +1733,7 @@ private def isDefEqApp (t s : Expr) : MetaM Bool := do
let tFn := t.getAppFn
let sFn := s.getAppFn
if tFn.isConst && sFn.isConst && tFn.constName! == sFn.constName! then
/- See comment at `tryHeuristic` explaining why we process arguments before universe levels. -/
/- See comment at `tryHeuristic` explaining why we processe arguments before universe levels. -/
if ( checkpointDefEq (isDefEqArgs tFn t.getAppArgs s.getAppArgs <&&> isListLevelDefEqAux tFn.constLevels! sFn.constLevels!)) then
return true
else
@@ -1743,7 +1743,7 @@ private def isDefEqApp (t s : Expr) : MetaM Bool := do
else
isDefEqOnFailure t s
/-- Return `true` if the type of the given expression is an inductive datatype with a single constructor with no fields. -/
/-- Return `true` if the types of the given expressions is an inductive datatype with an inductive datatype with a single constructor with no fields. -/
private def isDefEqUnitLike (t : Expr) (s : Expr) : MetaM Bool := do
let tType whnf ( inferType t)
matchConstStruct tType.getAppFn (fun _ => return false) fun _ _ ctorVal => do

View File

@@ -37,7 +37,7 @@ def f (a b : Ty.bool.interp) : Ty.bool.interp :=
See comment at `DiscrTree`.
-/
abbrev InstanceKey := DiscrTree.Key
abbrev InstanceKey := DiscrTree.Key (simpleReduce := false)
structure InstanceEntry where
keys : Array InstanceKey
@@ -63,7 +63,7 @@ instance : ToFormat InstanceEntry where
| some n => format n
| _ => "<local>"
abbrev InstanceTree := DiscrTree InstanceEntry
abbrev InstanceTree := DiscrTree InstanceEntry (simpleReduce := false)
structure Instances where
discrTree : InstanceTree := DiscrTree.empty
@@ -71,13 +71,10 @@ structure Instances where
erased : PHashSet Name := {}
deriving Inhabited
/-- Configuration for the discrimination tree module -/
def tcDtConfig : WhnfCoreConfig := {}
def addInstanceEntry (d : Instances) (e : InstanceEntry) : Instances :=
match e.globalName? with
| some n => { d with discrTree := d.discrTree.insertCore e.keys e tcDtConfig, instanceNames := d.instanceNames.insert n e, erased := d.erased.erase n }
| none => { d with discrTree := d.discrTree.insertCore e.keys e tcDtConfig }
| some n => { d with discrTree := d.discrTree.insertCore e.keys e, instanceNames := d.instanceNames.insert n e, erased := d.erased.erase n }
| none => { d with discrTree := d.discrTree.insertCore e.keys e }
def Instances.eraseCore (d : Instances) (declName : Name) : Instances :=
{ d with erased := d.erased.insert declName, instanceNames := d.instanceNames.erase declName }
@@ -97,7 +94,7 @@ private def mkInstanceKey (e : Expr) : MetaM (Array InstanceKey) := do
let type inferType e
withNewMCtxDepth do
let (_, _, type) forallMetaTelescopeReducing type
DiscrTree.mkPath type tcDtConfig
DiscrTree.mkPath type
/--
Compute the order the arguments of `inst` should by synthesized.
@@ -210,7 +207,7 @@ builtin_initialize
modifyEnv fun env => instanceExtension.modifyState env fun _ => s
}
def getGlobalInstancesIndex : CoreM (DiscrTree InstanceEntry) :=
def getGlobalInstancesIndex : CoreM (DiscrTree InstanceEntry (simpleReduce := false)) :=
return Meta.instanceExtension.getState ( getEnv) |>.discrTree
def getErasedInstances : CoreM (PHashSet Name) :=

View File

@@ -910,17 +910,11 @@ private partial def updateAlts (typeNew : Expr) (altNumParams : Array Nat) (alts
- matcherApp `match_i As (fun xs => motive[xs]) discrs (fun ys_1 => (alt_1 : motive (C_1[ys_1])) ... (fun ys_n => (alt_n : motive (C_n[ys_n]) remaining`, and
- expression `e : B[discrs]`,
Construct the term
`match_i As (fun xs => B[xs] -> motive[xs]) discrs (fun ys_1 (y : B[C_1[ys_1]]) => alt_1) ... (fun ys_n (y : B[C_n[ys_n]]) => alt_n) e remaining`.
`match_i As (fun xs => B[xs] -> motive[xs]) discrs (fun ys_1 (y : B[C_1[ys_1]]) => alt_1) ... (fun ys_n (y : B[C_n[ys_n]]) => alt_n) e remaining`, and
We use `kabstract` to abstract the discriminants from `B[discrs]`.
This method assumes
- the `matcherApp.motive` is a lambda abstraction where `xs.size == discrs.size`
- each alternative is a lambda abstraction where `ys_i.size == matcherApp.altNumParams[i]`
This is used in in `Lean.Elab.PreDefinition.WF.Fix` when replacing recursive calls with calls to
the argument provided by `fix` to refine the termination argument, which may mention `major`.
See there for how to use this function.
-/
def MatcherApp.addArg (matcherApp : MatcherApp) (e : Expr) : MetaM MatcherApp :=
lambdaTelescope matcherApp.motive fun motiveArgs motiveBody => do
@@ -957,76 +951,13 @@ def MatcherApp.addArg (matcherApp : MatcherApp) (e : Expr) : MetaM MatcherApp :=
remaining := #[e] ++ matcherApp.remaining
}
/-- Similar to `MatcherApp.addArg`, but returns `none` on failure. -/
/-- Similar `MatcherApp.addArg?`, but returns `none` on failure. -/
def MatcherApp.addArg? (matcherApp : MatcherApp) (e : Expr) : MetaM (Option MatcherApp) :=
try
return some ( matcherApp.addArg e)
catch _ =>
return none
/-- Given
- matcherApp `match_i As (fun xs => motive[xs]) discrs (fun ys_1 => (alt_1 : motive (C_1[ys_1])) ... (fun ys_n => (alt_n : motive (C_n[ys_n]) remaining`, and
- a expression `B[discrs]` (which may not be a type, e.g. `n : Nat`),
returns the expressions `fun ys_1 ... ys_i => B[C_1[ys_1]] ... B[C_n[ys_n]]`,
This method assumes
- the `matcherApp.motive` is a lambda abstraction where `xs.size == discrs.size`
- each alternative is a lambda abstraction where `ys_i.size == matcherApp.altNumParams[i]`
This is similar to `MatcherApp.addArg` when you only have an expression to
refined, and not a type with a value.
This is used in in `Lean.Elab.PreDefinition.WF.GuessFix` when constructing the context of recursive
calls to refine the functions' paramter, which may mention `major`.
See there for how to use this function.
-/
def MatcherApp.refineThrough (matcherApp : MatcherApp) (e : Expr) : MetaM (Array Expr) :=
lambdaTelescope matcherApp.motive fun motiveArgs _motiveBody => do
unless motiveArgs.size == matcherApp.discrs.size do
-- This error can only happen if someone implemented a transformation that rewrites the motive created by `mkMatcher`.
throwError "failed to transfer argument through matcher application, motive must be lambda expression with #{matcherApp.discrs.size} arguments"
let eAbst matcherApp.discrs.size.foldRevM (init := e) fun i eAbst => do
let motiveArg := motiveArgs[i]!
let discr := matcherApp.discrs[i]!
let eTypeAbst kabstract eAbst discr
return eTypeAbst.instantiate1 motiveArg
-- Let's create something thats a `Sort` and mentions `e`
-- (recall that `e` itself possibly isn't a type),
-- by writing `e = e`, so that we can use it as a motive
let eEq mkEq eAbst eAbst
let matcherLevels match matcherApp.uElimPos? with
| none => pure matcherApp.matcherLevels
| some pos =>
pure <| matcherApp.matcherLevels.set! pos levelZero
let motive mkLambdaFVars motiveArgs eEq
let aux := mkAppN (mkConst matcherApp.matcherName matcherLevels.toList) matcherApp.params
let aux := mkApp aux motive
let aux := mkAppN aux matcherApp.discrs
unless ( isTypeCorrect aux) do
throwError "failed to transfer argument through matcher application, type error when constructing the new motive"
let auxType inferType aux
forallTelescope auxType fun altAuxs _ => do
let altAuxTys altAuxs.mapM (inferType ·)
(Array.zip matcherApp.altNumParams altAuxTys).mapM fun (altNumParams, altAuxTy) => do
forallBoundedTelescope altAuxTy altNumParams fun fvs body => do
unless fvs.size = altNumParams do
throwError "failed to transfer argument through matcher application, alt type must be telescope with #{altNumParams} arguments"
-- extract type from our synthetic equality
let body := body.getArg! 2
-- and abstract over the parameters of the alternatives, so that we can safely pass the Expr out
mkLambdaFVars fvs body
/-- A non-failing version of `MatcherApp.refineThrough` -/
def MatcherApp.refineThrough? (matcherApp : MatcherApp) (e : Expr) :
MetaM (Option (Array Expr)) :=
try
return some ( matcherApp.refineThrough e)
catch _ =>
return none
builtin_initialize
registerTraceClass `Meta.Match.match
registerTraceClass `Meta.Match.debug

View File

@@ -195,7 +195,7 @@ def getInstances (type : Expr) : MetaM (Array Instance) := do
| none => throwError "type class instance expected{indentExpr type}"
| some className =>
let globalInstances getGlobalInstancesIndex
let result globalInstances.getUnify type tcDtConfig
let result globalInstances.getUnify type
-- Using insertion sort because it is stable and the array `result` should be mostly sorted.
-- Most instances have default priority.
let result := result.insertionSort fun e₁ e₂ => e₁.priority < e₂.priority
@@ -582,16 +582,13 @@ def main (type : Expr) (maxResultSize : Nat) : MetaM (Option AbstractMVarsResult
let action : SynthM (Option AbstractMVarsResult) := do
newSubgoal ( getMCtx) key mvar Waiter.root
synth
-- TODO: it would be nice to have a nice notation for the following idiom
withCatchingRuntimeEx
try
withoutCatchingRuntimeEx do
action.run { maxResultSize := maxResultSize, maxHeartbeats := getMaxHeartbeats ( getOptions) } |>.run' {}
catch ex =>
if ex.isRuntime then
throwError "failed to synthesize{indentExpr type}\n{ex.toMessageData}"
else
throw ex
try
action.run { maxResultSize := maxResultSize, maxHeartbeats := getMaxHeartbeats ( getOptions) } |>.run' {}
catch ex =>
if ex.isMaxHeartbeat then
throwError "failed to synthesize{indentExpr type}\n{ex.toMessageData}"
else
throw ex
end SynthInstance

View File

@@ -8,7 +8,7 @@ import Lean.Meta.Tactic.Clear
namespace Lean.Meta
private partial def cleanupCore (mvarId : MVarId) (toPreserve : Array FVarId) (indirectProps : Bool) : MetaM MVarId := do
private partial def cleanupCore (mvarId : MVarId) : MetaM MVarId := do
mvarId.withContext do
mvarId.checkNotAssigned `cleanup
let used collectUsed |>.run' (false, {})
@@ -53,23 +53,18 @@ where
collectUsed : StateRefT (Bool × FVarIdSet) MetaM FVarIdSet := do
addUsedFVars ( instantiateMVars ( mvarId.getType))
toPreserve.forM addUsedFVar
if indirectProps then collectProps
collectProps
return ( get).2
/--
Auxiliary tactic for cleaning the local context. It removes local declarations (aka hypotheses) that are *not* relevant.
We say a variable `x` is "relevant" if
- It occurs in the `toPreserve` array, or
- It occurs in the target type, or
- There is a relevant variable `y` that depends on `x`, or
- If `indirectProps` is true, the type of `x` is a proposition and it depends on a relevant variable `y`.
By default, `toPreserve := #[]` and `indirectProps := true`. These settings are used in the mathlib tactic `extract_goal`
to give the user more control over which variables to include.
- The type of `x` is a proposition and it depends on a relevant variable `y`.
-/
abbrev _root_.Lean.MVarId.cleanup (mvarId : MVarId) (toPreserve : Array FVarId := #[]) (indirectProps : Bool := true) : MetaM MVarId := do
cleanupCore mvarId toPreserve indirectProps
abbrev _root_.Lean.MVarId.cleanup (mvarId : MVarId) : MetaM MVarId := do
cleanupCore mvarId
@[deprecated MVarId.cleanup]
abbrev cleanup (mvarId : MVarId) : MetaM MVarId := do

View File

@@ -62,12 +62,9 @@ private def replaceLocalDeclCore (mvarId : MVarId) (fvarId : FVarId) (typeNew :
mvarId.withContext do
let localDecl fvarId.getDecl
let typeNewPr mkEqMP eqProof (mkFVar fvarId)
/- `typeNew` may contain variables that occur after `fvarId`.
Thus, we use the auxiliary function `findMaxFVar` to ensure `typeNew` is well-formed at the
position we are inserting it.
We must `instantiateMVars` first to ensure that there is no mvar in `typeNew` which is
assigned to some later-occurring fvar. -/
let (_, localDecl') findMaxFVar ( instantiateMVars typeNew) |>.run localDecl
-- `typeNew` may contain variables that occur after `fvarId`.
-- Thus, we use the auxiliary function `findMaxFVar` to ensure `typeNew` is well-formed at the position we are inserting it.
let (_, localDecl') findMaxFVar typeNew |>.run localDecl
let result mvarId.assertAfter localDecl'.fvarId localDecl.userName typeNew typeNewPr
(do let mvarIdNew result.mvarId.clear fvarId
pure { result with mvarId := mvarIdNew })
@@ -84,19 +81,11 @@ where
/--
Replace type of the local declaration with id `fvarId` with one with the same user-facing name, but with type `typeNew`.
This method assumes `eqProof` is a proof that the type of `fvarId` is equal to `typeNew`.
This tactic actually adds a new declaration and (tries to) clear the old one.
This method assumes `eqProof` is a proof that type of `fvarId` is equal to `typeNew`.
This tactic actually adds a new declaration and (try to) clear the old one.
If the old one cannot be cleared, then at least its user-facing name becomes inaccessible.
The new local declaration is inserted at the soonest point after `fvarId` at which it is
well-formed. That is, if `typeNew` involves declarations which occur later than `fvarId` in the
local context, the new local declaration will be inserted immediately after the latest-occurring
one. Otherwise, it will be inserted immediately after `fvarId`.
Note: `replaceLocalDecl` should not be used when unassigned pending mvars might be present in
`typeNew`, as these may later be synthesized to fvars which occur after `fvarId` (by e.g.
`Term.withSynthesize` or `Term.synthesizeSyntheticMVars`) .
-/
Remark: the new declaration is added immediately after `fvarId`.
`typeNew` must be well-formed at `fvarId`, but `eqProof` may contain variables declared after `fvarId`. -/
abbrev _root_.Lean.MVarId.replaceLocalDecl (mvarId : MVarId) (fvarId : FVarId) (typeNew : Expr) (eqProof : Expr) : MetaM AssertAfterResult :=
replaceLocalDeclCore mvarId fvarId typeNew eqProof

Some files were not shown because too many files have changed in this diff Show More