mirror of
https://github.com/leanprover/lean4.git
synced 2026-03-17 18:34:06 +00:00
Compare commits
172 Commits
hbv/port_s
...
3c32607020
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3c32607020 | ||
|
|
6714601ee4 | ||
|
|
6b604625f2 | ||
|
|
e96b0ff39c | ||
|
|
50ee6dff0a | ||
|
|
9e0aa14b6f | ||
|
|
5c685465bd | ||
|
|
ef87f6b9ac | ||
|
|
49715fe63c | ||
|
|
133fd016b4 | ||
|
|
76e593a52d | ||
|
|
fa9a32b5c8 | ||
|
|
2d999d7622 | ||
|
|
ddd5c213c6 | ||
|
|
c9ceba1784 | ||
|
|
57df23f27e | ||
|
|
ea8fca2d9f | ||
|
|
274997420a | ||
|
|
6631352136 | ||
|
|
cfa8c5a036 | ||
|
|
7120d9aef5 | ||
|
|
c2d4079193 | ||
|
|
47b3be0524 | ||
|
|
de2b177423 | ||
|
|
a32173e6f6 | ||
|
|
e6d9220eee | ||
|
|
aae827cb4c | ||
|
|
47833725ea | ||
|
|
24acf2b895 | ||
|
|
d9ebd51c04 | ||
|
|
6a2a884372 | ||
|
|
4740e044c8 | ||
|
|
4deb8d5b50 | ||
|
|
d3db4368d4 | ||
|
|
652ca9f5b7 | ||
|
|
a32be44f90 | ||
|
|
e43b526363 | ||
|
|
734566088f | ||
|
|
17807e1cbe | ||
|
|
4450ff8995 | ||
|
|
9fac847f5f | ||
|
|
7acf5710c4 | ||
|
|
220a242f65 | ||
|
|
ff6816a854 | ||
|
|
cd85b93d93 | ||
|
|
bb047b8725 | ||
|
|
2ea4d016c4 | ||
|
|
b626c6d326 | ||
|
|
ebfc34466b | ||
|
|
49ed556479 | ||
|
|
e9060e7a4e | ||
|
|
0ebc126718 | ||
|
|
daddac1797 | ||
|
|
04f676ec64 | ||
|
|
9b1973ada7 | ||
|
|
85d38cba84 | ||
|
|
e5e7dcc00f | ||
|
|
ce6a07c4d9 | ||
|
|
320ddae700 | ||
|
|
ada53633dc | ||
|
|
e01cbf2b8f | ||
|
|
71ff366211 | ||
|
|
670360681f | ||
|
|
079db91c8c | ||
|
|
007e082b1c | ||
|
|
cdfde63734 | ||
|
|
2e06fb5008 | ||
|
|
37f10435a9 | ||
|
|
a4dd66df62 | ||
|
|
40e8f4c5fb | ||
|
|
63098493b3 | ||
|
|
fe3ba4dc4c | ||
|
|
e9e46f4199 | ||
|
|
e2b500b204 | ||
|
|
e804829101 | ||
|
|
27b583d304 | ||
|
|
d8accf47b3 | ||
|
|
530842e843 | ||
|
|
9c852d2f8c | ||
|
|
c948d24b6d | ||
|
|
c1bcc4d1ac | ||
|
|
a3cb39eac9 | ||
|
|
54f188160c | ||
|
|
68ea28c24f | ||
|
|
35944c367b | ||
|
|
5f3ca3ac3d | ||
|
|
ee293de982 | ||
|
|
a165292462 | ||
|
|
db6aa9d8d3 | ||
|
|
6ebe573c19 | ||
|
|
f059a1ebd3 | ||
|
|
a34777a08d | ||
|
|
fe1ad52f88 | ||
|
|
8d42ad4796 | ||
|
|
333ab1c6f0 | ||
|
|
4384344465 | ||
|
|
3cfa2dac42 | ||
|
|
e044ffae6a | ||
|
|
09f8cfc539 | ||
|
|
5191b30b20 | ||
|
|
10ece4e082 | ||
|
|
8526edb1fc | ||
|
|
caad260789 | ||
|
|
2f3d0ee6ad | ||
|
|
eacb82e5f3 | ||
|
|
e78ba3bd85 | ||
|
|
551086c854 | ||
|
|
36f05c4a18 | ||
|
|
41cb6dac1d | ||
|
|
47b7c7e65e | ||
|
|
cbee80d92c | ||
|
|
1c60b40261 | ||
|
|
0fb289c470 | ||
|
|
59711e5cff | ||
|
|
f3752861c9 | ||
|
|
d03499322d | ||
|
|
66bc9ae177 | ||
|
|
0f7fb1ea4d | ||
|
|
530925c69b | ||
|
|
73640d3758 | ||
|
|
e14f2c8c93 | ||
|
|
df61abb08f | ||
|
|
dc63bb0b70 | ||
|
|
7ca47aad7d | ||
|
|
1f04bf4fd1 | ||
|
|
03a5db34c7 | ||
|
|
f4bbf748df | ||
|
|
46fe37290e | ||
|
|
dd710dd1bd | ||
|
|
9a841125e7 | ||
|
|
2daaa50afb | ||
|
|
145a121048 | ||
|
|
584d92d302 | ||
|
|
d66aaebca6 | ||
|
|
4ac7ea4aab | ||
|
|
6bebf9c529 | ||
|
|
df74c80973 | ||
|
|
292b423a17 | ||
|
|
cda84702e9 | ||
|
|
ec565f3bf7 | ||
|
|
feea8a7611 | ||
|
|
6d305096e5 | ||
|
|
235b0eb987 | ||
|
|
5dd8d570fd | ||
|
|
3ea59e15b8 | ||
|
|
d59f229b74 | ||
|
|
a364595111 | ||
|
|
08ab8bf7c3 | ||
|
|
54df5173d2 | ||
|
|
36ffba4b57 | ||
|
|
2e9e5db408 | ||
|
|
81a5eb55d5 | ||
|
|
b4f768b67f | ||
|
|
9843794e3f | ||
|
|
9bd4dfb696 | ||
|
|
b1db0d2798 | ||
|
|
4cd7a85334 | ||
|
|
6cf1c4a1be | ||
|
|
e7aa785822 | ||
|
|
668f07039c | ||
|
|
005f6ae7cd | ||
|
|
738688efee | ||
|
|
adf3e5e661 | ||
|
|
38682c4d4a | ||
|
|
f2438a1830 | ||
|
|
48c37f6588 | ||
|
|
8273df0d0b | ||
|
|
f83a8b4cd5 | ||
|
|
fedfc22c53 | ||
|
|
a91fb93eee | ||
|
|
b3b4867d6c | ||
|
|
1e4894b431 |
@@ -20,9 +20,24 @@ CTEST_PARALLEL_LEVEL="$(nproc)" CTEST_OUTPUT_ON_FAILURE=1 \
|
||||
make -C build/release -j "$(nproc)" test ARGS='--rerun-failed'
|
||||
|
||||
# Single test from tests/foo/bar/ (quick check during development)
|
||||
cd tests/foo/bar && ./run_test example_test.lean
|
||||
CTEST_PARALLEL_LEVEL="$(nproc)" CTEST_OUTPUT_ON_FAILURE=1 \
|
||||
make -C build/release -j "$(nproc)" test ARGS=-R testname'
|
||||
```
|
||||
|
||||
## Testing stage 2
|
||||
|
||||
When requested to test stage 2, build it as follows:
|
||||
```
|
||||
make -C build/release stage2 -j$(nproc)
|
||||
```
|
||||
Stage 2 is *not* automatically invalidated by changes to `src/` which allows for faster iteration
|
||||
when fixing a specific file in the stage 2 build but for invalidating any files that already passed
|
||||
the stage 2 build as well as for final validation,
|
||||
```
|
||||
make -C build/release/stage2 clean-stdlib
|
||||
```
|
||||
must be run manually before building.
|
||||
|
||||
## New features
|
||||
|
||||
When asked to implement new features:
|
||||
@@ -40,6 +55,10 @@ When asked to implement new features:
|
||||
- ONLY use the project's documented build command: `make -j$(nproc) -C build/release`
|
||||
- If a build is broken, ask the user before attempting any manual cleanup
|
||||
|
||||
## stage0 Is a Copy of src
|
||||
|
||||
**Never manually edit files under `stage0/`.** The `stage0/` directory is a snapshot of `src/` produced by `make update-stage0`. To change anything in stage0 (CMakeLists.txt, C++ source, etc.), edit the corresponding file in `src/` and let `update-stage0` propagate it.
|
||||
|
||||
## LSP and IDE Diagnostics
|
||||
|
||||
After rebuilding, LSP diagnostics may be stale until the user interacts with files. Trust command-line test results over IDE diagnostics.
|
||||
|
||||
@@ -121,6 +121,42 @@ The nightly build system uses branches and tags across two repositories:
|
||||
|
||||
When a nightly succeeds with mathlib, all three should point to the same commit. Don't confuse these: branches are in the main lean4 repo, dated tags are in lean4-nightly.
|
||||
|
||||
## CI Failures: Investigate Immediately
|
||||
|
||||
**CRITICAL: If the checklist reports `❌ CI: X check(s) failing` for any PR, investigate immediately.**
|
||||
|
||||
Do NOT:
|
||||
- Report it as "CI in progress" or "some checks pending"
|
||||
- Wait for the remaining checks to finish before investigating
|
||||
- Assume it's a transient failure without checking
|
||||
|
||||
DO:
|
||||
1. Run `gh pr checks <number> --repo <owner>/<repo>` to see which specific check failed
|
||||
2. Run `gh run view <run-id> --repo <owner>/<repo> --log-failed` to see the failure output
|
||||
3. Diagnose the failure and report clearly to the user: what failed and why
|
||||
4. Propose a fix if one is obvious (e.g., subverso version mismatch, transient elan install error)
|
||||
|
||||
The checklist now distinguishes `❌ X check(s) failing, Y still in progress` from `🔄 Y check(s) in progress`.
|
||||
Any `❌` in CI status requires immediate investigation — do not move on.
|
||||
|
||||
## Waiting for CI or Merges
|
||||
|
||||
Use `gh pr checks --watch` to block until a PR's CI checks complete (no polling needed).
|
||||
Run these as background bash commands so you get notified when they finish:
|
||||
|
||||
```bash
|
||||
# Watch CI, then check merge state
|
||||
gh pr checks <number> --repo <owner>/<repo> --watch && gh pr view <number> --repo <owner>/<repo> --json state --jq '.state'
|
||||
```
|
||||
|
||||
For multiple PRs, launch one background command per PR in parallel. When each completes,
|
||||
you'll be notified automatically via a task-notification. Do NOT use sleep-based polling
|
||||
loops — `--watch` is event-driven and exits as soon as checks finish.
|
||||
|
||||
Note: `gh pr checks --watch` exits as soon as ALL checks complete (pass or fail). If some checks
|
||||
fail while others are still running, `--watch` will continue until everything settles, then exit
|
||||
with a non-zero code. So a background `--watch` finishing = all checks done; check which failed.
|
||||
|
||||
## Error Handling
|
||||
|
||||
**CRITICAL**: If something goes wrong or a command fails:
|
||||
|
||||
26
.claude/skills/profiling/SKILL.md
Normal file
26
.claude/skills/profiling/SKILL.md
Normal file
@@ -0,0 +1,26 @@
|
||||
---
|
||||
name: profiling
|
||||
description: Profile Lean programs with demangled names using samply and Firefox Profiler. Use when the user asks to profile a Lean binary or investigate performance.
|
||||
allowed-tools: Bash, Read, Glob, Grep
|
||||
---
|
||||
|
||||
# Profiling Lean Programs
|
||||
|
||||
Full documentation: `script/PROFILER_README.md`.
|
||||
|
||||
## Quick Start
|
||||
|
||||
```bash
|
||||
script/lean_profile.sh ./build/release/stage1/bin/lean some_file.lean
|
||||
```
|
||||
|
||||
Requires `samply` (`cargo install samply`) and `python3`.
|
||||
|
||||
## Agent Notes
|
||||
|
||||
- The pipeline is interactive (serves to browser at the end). When running non-interactively, run the steps manually instead of using the wrapper script.
|
||||
- The three steps are: `samply record --save-only`, `symbolicate_profile.py`, then `serve_profile.py`.
|
||||
- `lean_demangle.py` works standalone as a stdin filter (like `c++filt`) for quick name lookups.
|
||||
- The `--raw` flag on `lean_demangle.py` gives exact demangled names without postprocessing (keeps `._redArg`, `._lam_0` suffixes as-is).
|
||||
- Use `PROFILE_KEEP=1` to keep the temp directory for later inspection.
|
||||
- The demangled profile is a standard Firefox Profiler JSON. Function names live in `threads[i].stringArray`, indexed by `threads[i].funcTable.name`.
|
||||
6
.gitattributes
vendored
6
.gitattributes
vendored
@@ -5,9 +5,3 @@ stage0/** binary linguist-generated
|
||||
# The following file is often manually edited, so do show it in diffs
|
||||
stage0/src/stdlib_flags.h -binary -linguist-generated
|
||||
doc/std/grove/GroveStdlib/Generated/** linguist-generated
|
||||
# These files should not have line endings translated on Windows, because
|
||||
# it throws off parser tests. Later lines override earlier ones, so the
|
||||
# runner code is still treated as ordinary text.
|
||||
tests/lean/docparse/* eol=lf
|
||||
tests/lean/docparse/*.lean eol=auto
|
||||
tests/lean/docparse/*.sh eol=auto
|
||||
|
||||
9
.github/workflows/awaiting-manual.yml
vendored
9
.github/workflows/awaiting-manual.yml
vendored
@@ -2,16 +2,19 @@ name: Check awaiting-manual label
|
||||
|
||||
on:
|
||||
merge_group:
|
||||
pull_request:
|
||||
pull_request_target:
|
||||
types: [opened, synchronize, reopened, labeled, unlabeled]
|
||||
|
||||
permissions:
|
||||
pull-requests: read
|
||||
|
||||
jobs:
|
||||
check-awaiting-manual:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check awaiting-manual label
|
||||
id: check-awaiting-manual-label
|
||||
if: github.event_name == 'pull_request'
|
||||
if: github.event_name == 'pull_request_target'
|
||||
uses: actions/github-script@v8
|
||||
with:
|
||||
script: |
|
||||
@@ -28,7 +31,7 @@ jobs:
|
||||
}
|
||||
|
||||
- name: Wait for manual compatibility
|
||||
if: github.event_name == 'pull_request' && steps.check-awaiting-manual-label.outputs.awaiting == 'true'
|
||||
if: github.event_name == 'pull_request_target' && steps.check-awaiting-manual-label.outputs.awaiting == 'true'
|
||||
run: |
|
||||
echo "::notice title=Awaiting manual::PR is marked 'awaiting-manual' but neither 'breaks-manual' nor 'builds-manual' labels are present."
|
||||
echo "This check will remain in progress until the PR is updated with appropriate manual compatibility labels."
|
||||
|
||||
9
.github/workflows/awaiting-mathlib.yml
vendored
9
.github/workflows/awaiting-mathlib.yml
vendored
@@ -2,16 +2,19 @@ name: Check awaiting-mathlib label
|
||||
|
||||
on:
|
||||
merge_group:
|
||||
pull_request:
|
||||
pull_request_target:
|
||||
types: [opened, synchronize, reopened, labeled, unlabeled]
|
||||
|
||||
permissions:
|
||||
pull-requests: read
|
||||
|
||||
jobs:
|
||||
check-awaiting-mathlib:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check awaiting-mathlib label
|
||||
id: check-awaiting-mathlib-label
|
||||
if: github.event_name == 'pull_request'
|
||||
if: github.event_name == 'pull_request_target'
|
||||
uses: actions/github-script@v8
|
||||
with:
|
||||
script: |
|
||||
@@ -28,7 +31,7 @@ jobs:
|
||||
}
|
||||
|
||||
- name: Wait for mathlib compatibility
|
||||
if: github.event_name == 'pull_request' && steps.check-awaiting-mathlib-label.outputs.awaiting == 'true'
|
||||
if: github.event_name == 'pull_request_target' && steps.check-awaiting-mathlib-label.outputs.awaiting == 'true'
|
||||
run: |
|
||||
echo "::notice title=Awaiting mathlib::PR is marked 'awaiting-mathlib' but neither 'breaks-mathlib' nor 'builds-mathlib' labels are present."
|
||||
echo "This check will remain in progress until the PR is updated with appropriate mathlib compatibility labels."
|
||||
|
||||
16
.github/workflows/build-template.yml
vendored
16
.github/workflows/build-template.yml
vendored
@@ -66,16 +66,10 @@ jobs:
|
||||
brew install ccache tree zstd coreutils gmp libuv
|
||||
if: runner.os == 'macOS'
|
||||
- name: Checkout
|
||||
if: (!endsWith(matrix.os, '-with-cache'))
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
# the default is to use a virtual merge commit between the PR and master: just use the PR
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
- name: Namespace Checkout
|
||||
if: endsWith(matrix.os, '-with-cache')
|
||||
uses: namespacelabs/nscloud-checkout-action@v8
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
- name: Open Nix shell once
|
||||
run: true
|
||||
if: runner.os == 'Linux'
|
||||
@@ -240,14 +234,16 @@ jobs:
|
||||
- name: Build Stage 2
|
||||
run: |
|
||||
make -C build -j$NPROC stage2
|
||||
if: matrix.test-speedcenter
|
||||
if: matrix.test-bench
|
||||
- name: Check Stage 3
|
||||
run: |
|
||||
make -C build -j$NPROC check-stage3
|
||||
if: matrix.check-stage3
|
||||
- name: Test Speedcenter Benchmarks
|
||||
run: nix shell github:Kha/lakeprof -c make -C build -j$NPROC bench
|
||||
if: matrix.test-speedcenter
|
||||
- name: Test Benchmarks
|
||||
run: |
|
||||
cd tests
|
||||
nix develop -c make -C ../build -j$NPROC bench
|
||||
if: matrix.test-bench
|
||||
- name: Check rebootstrap
|
||||
run: |
|
||||
set -e
|
||||
|
||||
5
.github/workflows/check-stdlib-flags.yml
vendored
5
.github/workflows/check-stdlib-flags.yml
vendored
@@ -1,9 +1,12 @@
|
||||
name: Check stdlib_flags.h modifications
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
pull_request_target:
|
||||
types: [opened, synchronize, reopened, labeled, unlabeled]
|
||||
|
||||
permissions:
|
||||
pull-requests: read
|
||||
|
||||
jobs:
|
||||
check-stdlib-flags:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
25
.github/workflows/ci.yml
vendored
25
.github/workflows/ci.yml
vendored
@@ -166,7 +166,7 @@ jobs:
|
||||
# 0: PRs without special label
|
||||
# 1: PRs with `merge-ci` label, merge queue checks, master commits
|
||||
# 2: nightlies
|
||||
# 3: PRs with `release-ci` label, full releases
|
||||
# 3: PRs with `release-ci` or `lake-ci` label, full releases
|
||||
- name: Set check level
|
||||
id: set-level
|
||||
# We do not use github.event.pull_request.labels.*.name here because
|
||||
@@ -175,6 +175,7 @@ jobs:
|
||||
run: |
|
||||
check_level=0
|
||||
fast=false
|
||||
lake_ci=false
|
||||
|
||||
if [[ -n "${{ steps.set-release.outputs.RELEASE_TAG }}" || -n "${{ steps.set-release-custom.outputs.RELEASE_TAG }}" ]]; then
|
||||
check_level=3
|
||||
@@ -189,13 +190,19 @@ jobs:
|
||||
elif echo "$labels" | grep -q "merge-ci"; then
|
||||
check_level=1
|
||||
fi
|
||||
if echo "$labels" | grep -q "lake-ci"; then
|
||||
lake_ci=true
|
||||
fi
|
||||
if echo "$labels" | grep -q "fast-ci"; then
|
||||
fast=true
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "check-level=$check_level" >> "$GITHUB_OUTPUT"
|
||||
echo "fast=$fast" >> "$GITHUB_OUTPUT"
|
||||
{
|
||||
echo "check-level=$check_level"
|
||||
echo "fast=$fast"
|
||||
echo "lake-ci=$lake_ci"
|
||||
} >> "$GITHUB_OUTPUT"
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
|
||||
@@ -206,6 +213,7 @@ jobs:
|
||||
script: |
|
||||
const level = ${{ steps.set-level.outputs.check-level }};
|
||||
const fast = ${{ steps.set-level.outputs.fast }};
|
||||
const lakeCi = "${{ steps.set-level.outputs.lake-ci }}" == "true";
|
||||
console.log(`level: ${level}, fast: ${fast}`);
|
||||
// use large runners where available (original repo)
|
||||
let large = ${{ github.repository == 'leanprover/lean4' }};
|
||||
@@ -258,8 +266,8 @@ jobs:
|
||||
"check-rebootstrap": level >= 1,
|
||||
"check-stage3": level >= 2,
|
||||
"test": true,
|
||||
// NOTE: `test-speedcenter` currently seems to be broken on `ubuntu-latest`
|
||||
"test-speedcenter": large && level >= 2,
|
||||
// NOTE: `test-bench` currently seems to be broken on `ubuntu-latest`
|
||||
"test-bench": large && level >= 2,
|
||||
// We are not warning-free yet on all platforms, start here
|
||||
"CMAKE_OPTIONS": "-DLEAN_EXTRA_CXX_FLAGS=-Werror",
|
||||
},
|
||||
@@ -269,6 +277,8 @@ jobs:
|
||||
"enabled": level >= 2,
|
||||
"test": true,
|
||||
"CMAKE_PRESET": "reldebug",
|
||||
// * `elab_bench/big_do` crashes with exit code 134
|
||||
"CTEST_OPTIONS": "-E 'elab_bench/big_do'",
|
||||
},
|
||||
{
|
||||
"name": "Linux fsanitize",
|
||||
@@ -377,6 +387,11 @@ jobs:
|
||||
job["CMAKE_OPTIONS"] = (job["CMAKE_OPTIONS"] ? job["CMAKE_OPTIONS"] + " " : "") + "-DUSE_LAKE=OFF";
|
||||
}
|
||||
}
|
||||
if (lakeCi) {
|
||||
for (const job of matrix) {
|
||||
job["CMAKE_OPTIONS"] = (job["CMAKE_OPTIONS"] ? job["CMAKE_OPTIONS"] + " " : "") + "-DLAKE_CI=ON";
|
||||
}
|
||||
}
|
||||
console.log(`matrix:\n${JSON.stringify(matrix, null, 2)}`);
|
||||
matrix = matrix.filter((job) => job["enabled"]);
|
||||
core.setOutput('matrix', matrix.filter((job) => !job["secondary"]));
|
||||
|
||||
8
.github/workflows/labels-from-comments.yml
vendored
8
.github/workflows/labels-from-comments.yml
vendored
@@ -1,5 +1,5 @@
|
||||
# This workflow allows any user to add one of the `awaiting-review`, `awaiting-author`, `WIP`,
|
||||
# `release-ci`, or a `changelog-XXX` label by commenting on the PR or issue.
|
||||
# `release-ci`, `lake-ci`, or a `changelog-XXX` label by commenting on the PR or issue.
|
||||
# If any labels from the set {`awaiting-review`, `awaiting-author`, `WIP`} are added, other labels
|
||||
# from that set are removed automatically at the same time.
|
||||
# Similarly, if any `changelog-XXX` label is added, other `changelog-YYY` labels are removed.
|
||||
@@ -12,7 +12,7 @@ on:
|
||||
|
||||
jobs:
|
||||
update-label:
|
||||
if: github.event.issue.pull_request != null && (contains(github.event.comment.body, 'awaiting-review') || contains(github.event.comment.body, 'awaiting-author') || contains(github.event.comment.body, 'WIP') || contains(github.event.comment.body, 'release-ci') || contains(github.event.comment.body, 'changelog-'))
|
||||
if: github.event.issue.pull_request != null && (contains(github.event.comment.body, 'awaiting-review') || contains(github.event.comment.body, 'awaiting-author') || contains(github.event.comment.body, 'WIP') || contains(github.event.comment.body, 'release-ci') || contains(github.event.comment.body, 'lake-ci') || contains(github.event.comment.body, 'changelog-'))
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
@@ -28,6 +28,7 @@ jobs:
|
||||
const awaitingAuthor = commentLines.includes('awaiting-author');
|
||||
const wip = commentLines.includes('WIP');
|
||||
const releaseCI = commentLines.includes('release-ci');
|
||||
const lakeCI = commentLines.includes('lake-ci');
|
||||
const changelogMatch = commentLines.find(line => line.startsWith('changelog-'));
|
||||
|
||||
if (awaitingReview || awaitingAuthor || wip) {
|
||||
@@ -49,6 +50,9 @@ jobs:
|
||||
if (releaseCI) {
|
||||
await github.rest.issues.addLabels({ owner, repo, issue_number, labels: ['release-ci'] });
|
||||
}
|
||||
if (lakeCI) {
|
||||
await github.rest.issues.addLabels({ owner, repo, issue_number, labels: ['lake-ci'] });
|
||||
}
|
||||
|
||||
if (changelogMatch) {
|
||||
const changelogLabel = changelogMatch.trim();
|
||||
|
||||
10
.github/workflows/pr-body.yml
vendored
10
.github/workflows/pr-body.yml
vendored
@@ -2,17 +2,23 @@ name: Check PR body for changelog convention
|
||||
|
||||
on:
|
||||
merge_group:
|
||||
pull_request:
|
||||
pull_request_target:
|
||||
types: [opened, synchronize, reopened, edited, labeled, converted_to_draft, ready_for_review]
|
||||
|
||||
permissions:
|
||||
pull-requests: read
|
||||
|
||||
jobs:
|
||||
check-pr-body:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check PR body
|
||||
if: github.event_name == 'pull_request'
|
||||
if: github.event_name == 'pull_request_target'
|
||||
uses: actions/github-script@v8
|
||||
with:
|
||||
# Safety note: this uses pull_request_target, so the workflow has elevated privileges.
|
||||
# The PR title and body are only used in regex tests (read-only string matching),
|
||||
# never interpolated into shell commands, eval'd, or written to GITHUB_ENV/GITHUB_OUTPUT.
|
||||
script: |
|
||||
const { title, body, labels, draft } = context.payload.pull_request;
|
||||
if (!draft && /^(feat|fix):/.test(title) && !labels.some(label => label.name == "changelog-no")) {
|
||||
|
||||
2
.github/workflows/restart-on-label.yml
vendored
2
.github/workflows/restart-on-label.yml
vendored
@@ -7,7 +7,7 @@ on:
|
||||
jobs:
|
||||
restart-on-label:
|
||||
runs-on: ubuntu-latest
|
||||
if: contains(github.event.label.name, 'merge-ci') || contains(github.event.label.name, 'release-ci')
|
||||
if: contains(github.event.label.name, 'merge-ci') || contains(github.event.label.name, 'release-ci') || contains(github.event.label.name, 'lake-ci')
|
||||
steps:
|
||||
- run: |
|
||||
# Finding latest CI workflow run on current pull request
|
||||
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -1,7 +1,6 @@
|
||||
*~
|
||||
\#*
|
||||
.#*
|
||||
*.lock
|
||||
.lake
|
||||
lake-manifest.json
|
||||
/build
|
||||
@@ -21,6 +20,9 @@ settings.json
|
||||
!.claude/settings.json
|
||||
.gdb_history
|
||||
.vscode/*
|
||||
!.vscode/settings.json
|
||||
!.vscode/tasks.json
|
||||
!.vscode/extensions.json
|
||||
script/__pycache__
|
||||
*.produced.out
|
||||
CMakeSettings.json
|
||||
|
||||
5
.vscode/extensions.json
vendored
Normal file
5
.vscode/extensions.json
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
{
|
||||
"recommendations": [
|
||||
"leanprover.lean4"
|
||||
]
|
||||
}
|
||||
12
.vscode/settings.json
vendored
Normal file
12
.vscode/settings.json
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
{
|
||||
"files.insertFinalNewline": true,
|
||||
"files.trimTrailingWhitespace": true,
|
||||
// These require the CMake Tools extension (ms-vscode.cmake-tools).
|
||||
"cmake.buildDirectory": "${workspaceFolder}/build/release",
|
||||
"cmake.generator": "Unix Makefiles",
|
||||
"[lean4]": {
|
||||
"editor.rulers": [
|
||||
100
|
||||
]
|
||||
}
|
||||
}
|
||||
34
.vscode/tasks.json
vendored
Normal file
34
.vscode/tasks.json
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
{
|
||||
"version": "2.0.0",
|
||||
"tasks": [
|
||||
{
|
||||
"label": "build",
|
||||
"type": "shell",
|
||||
"command": "make -C build/release -j$(nproc 2>/dev/null || sysctl -n hw.logicalcpu 2>/dev/null || echo 4)",
|
||||
"problemMatcher": [],
|
||||
"group": {
|
||||
"kind": "build",
|
||||
"isDefault": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"label": "build-old",
|
||||
"type": "shell",
|
||||
"command": "make -C build/release -j$(nproc 2>/dev/null || sysctl -n hw.logicalcpu 2>/dev/null || echo 4) LAKE_EXTRA_ARGS=--old",
|
||||
"problemMatcher": [],
|
||||
"group": {
|
||||
"kind": "build"
|
||||
}
|
||||
},
|
||||
{
|
||||
"label": "test",
|
||||
"type": "shell",
|
||||
"command": "NPROC=$(nproc 2>/dev/null || sysctl -n hw.logicalcpu 2>/dev/null || echo 4); CTEST_OUTPUT_ON_FAILURE=1 make -C build/release test -j$NPROC ARGS=\"-j$NPROC\"",
|
||||
"problemMatcher": [],
|
||||
"group": {
|
||||
"kind": "test",
|
||||
"isDefault": true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -41,7 +41,7 @@ if(NOT (DEFINED STAGE0_CMAKE_EXECUTABLE_SUFFIX))
|
||||
set(STAGE0_CMAKE_EXECUTABLE_SUFFIX "${CMAKE_EXECUTABLE_SUFFIX}")
|
||||
endif()
|
||||
|
||||
# Don't do anything with cadical on wasm
|
||||
# Don't do anything with cadical/leantar on wasm
|
||||
if(NOT CMAKE_SYSTEM_NAME MATCHES "Emscripten")
|
||||
find_program(CADICAL cadical)
|
||||
if(NOT CADICAL)
|
||||
@@ -77,7 +77,44 @@ if(NOT CMAKE_SYSTEM_NAME MATCHES "Emscripten")
|
||||
set(CADICAL ${CMAKE_BINARY_DIR}/cadical/cadical${CMAKE_EXECUTABLE_SUFFIX})
|
||||
list(APPEND EXTRA_DEPENDS cadical)
|
||||
endif()
|
||||
list(APPEND CL_ARGS -DCADICAL=${CADICAL})
|
||||
find_program(LEANTAR leantar)
|
||||
if(NOT LEANTAR)
|
||||
set(LEANTAR_VERSION v0.1.19)
|
||||
if(CMAKE_SYSTEM_NAME MATCHES "Windows")
|
||||
set(LEANTAR_ARCHIVE_SUFFIX .zip)
|
||||
set(LEANTAR_TARGET x86_64-pc-windows-msvc)
|
||||
else()
|
||||
set(LEANTAR_ARCHIVE_SUFFIX .tar.gz)
|
||||
if(CMAKE_SYSTEM_PROCESSOR MATCHES "arm64")
|
||||
set(LEANTAR_TARGET_ARCH aarch64)
|
||||
else()
|
||||
set(LEANTAR_TARGET_ARCH x86_64)
|
||||
endif()
|
||||
if(CMAKE_SYSTEM_NAME MATCHES "Darwin")
|
||||
set(LEANTAR_TARGET_OS apple-darwin)
|
||||
else()
|
||||
set(LEANTAR_TARGET_OS unknown-linux-musl)
|
||||
endif()
|
||||
set(LEANTAR_TARGET ${LEANTAR_TARGET_ARCH}-${LEANTAR_TARGET_OS})
|
||||
endif()
|
||||
set(
|
||||
LEANTAR
|
||||
${CMAKE_BINARY_DIR}/leantar/leantar-${LEANTAR_VERSION}-${LEANTAR_TARGET}/leantar${CMAKE_EXECUTABLE_SUFFIX}
|
||||
)
|
||||
if(NOT EXISTS "${LEANTAR}")
|
||||
file(
|
||||
DOWNLOAD
|
||||
https://github.com/digama0/leangz/releases/download/${LEANTAR_VERSION}/leantar-${LEANTAR_VERSION}-${LEANTAR_TARGET}${LEANTAR_ARCHIVE_SUFFIX}
|
||||
${CMAKE_BINARY_DIR}/leantar${LEANTAR_ARCHIVE_SUFFIX}
|
||||
)
|
||||
file(
|
||||
ARCHIVE_EXTRACT
|
||||
INPUT ${CMAKE_BINARY_DIR}/leantar${LEANTAR_ARCHIVE_SUFFIX}
|
||||
DESTINATION ${CMAKE_BINARY_DIR}/leantar
|
||||
)
|
||||
endif()
|
||||
endif()
|
||||
list(APPEND CL_ARGS -DCADICAL=${CADICAL} -DLEANTAR=${LEANTAR})
|
||||
endif()
|
||||
|
||||
if(USE_MIMALLOC)
|
||||
|
||||
@@ -41,7 +41,7 @@
|
||||
"SMALL_ALLOCATOR": "OFF",
|
||||
"USE_MIMALLOC": "OFF",
|
||||
"BSYMBOLIC": "OFF",
|
||||
"LEAN_TEST_VARS": "MAIN_STACK_SIZE=16000 LSAN_OPTIONS=max_leaks=10"
|
||||
"LEAN_TEST_VARS": "MAIN_STACK_SIZE=16000 TEST_STACK_SIZE=16000 LSAN_OPTIONS=max_leaks=10"
|
||||
},
|
||||
"generator": "Unix Makefiles",
|
||||
"binaryDir": "${sourceDir}/build/sanitize"
|
||||
|
||||
@@ -7,7 +7,7 @@ Helpful links
|
||||
-------
|
||||
|
||||
* [Development Setup](./doc/dev/index.md)
|
||||
* [Testing](./doc/dev/testing.md)
|
||||
* [Testing](./tests/README.md)
|
||||
* [Commit convention](./doc/dev/commit_convention.md)
|
||||
|
||||
Before You Submit a Pull Request (PR):
|
||||
|
||||
206
LICENSES
206
LICENSES
@@ -1370,4 +1370,208 @@ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
SOFTWARE.
|
||||
==============================================================================
|
||||
leantar is by Mario Carneiro and distributed under the Apache 2.0 License:
|
||||
==============================================================================
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
1
doc/.gitignore
vendored
1
doc/.gitignore
vendored
@@ -1 +0,0 @@
|
||||
out
|
||||
@@ -1,7 +1,9 @@
|
||||
# Development Workflow
|
||||
|
||||
If you want to make changes to Lean itself, start by [building Lean](../make/index.md) from a clean checkout to make sure that everything is set up correctly.
|
||||
After that, read on below to find out how to set up your editor for changing the Lean source code, followed by further sections of the development manual where applicable such as on the [test suite](testing.md) and [commit convention](commit_convention.md).
|
||||
After that, read on below to find out how to set up your editor for changing the Lean source code,
|
||||
followed by further sections of the development manual where applicable
|
||||
such as on the [test suite](../../tests/README.md) and [commit convention](commit_convention.md).
|
||||
|
||||
If you are planning to make any changes that may affect the compilation of Lean itself, e.g. changes to the parser, elaborator, or compiler, you should first read about the [bootstrapping pipeline](bootstrap.md).
|
||||
You should not edit the `stage0` directory except using the commands described in that section when necessary.
|
||||
@@ -61,10 +63,10 @@ you can then put `my_name/lean4:my-tag` in your `lean-toolchain` file in a proje
|
||||
|
||||
### VS Code
|
||||
|
||||
There is a `lean.code-workspace` file that correctly sets up VS Code with workspace roots for the stage0/stage1 setup described above as well as with other settings.
|
||||
You should always load it when working on Lean, such as by invoking
|
||||
There is a `.vscode/` directory that correctly sets up VS Code with settings, tasks, and recommended extensions.
|
||||
Simply open the repository folder in VS Code, such as by invoking
|
||||
```
|
||||
code lean.code-workspace
|
||||
code .
|
||||
```
|
||||
on the command line.
|
||||
|
||||
|
||||
@@ -1,142 +0,0 @@
|
||||
# Test Suite
|
||||
|
||||
**Warning:** This document is partially outdated.
|
||||
It describes the old test suite, which is currently in the process of being replaced.
|
||||
The new test suite's documentation can be found at [`tests/README.md`](../../tests/README.md).
|
||||
|
||||
After [building Lean](../make/index.md) you can run all the tests using
|
||||
```
|
||||
cd build/release
|
||||
make test ARGS=-j4
|
||||
```
|
||||
Change the 4 to the maximum number of parallel tests you want to
|
||||
allow. The best choice is the number of CPU cores on your machine as
|
||||
the tests are mostly CPU bound. You can find the number of processors
|
||||
on linux using `nproc` and on Windows it is the `NUMBER_OF_PROCESSORS`
|
||||
environment variable.
|
||||
|
||||
You can run tests after [building a specific stage](bootstrap.md) by
|
||||
adding the `-C stageN` argument. The default when run as above is stage 1. The
|
||||
Lean tests will automatically use that stage's corresponding Lean
|
||||
executables
|
||||
|
||||
Running `make test` will not pick up new test files; run
|
||||
```bash
|
||||
cmake build/release/stage1
|
||||
```
|
||||
to update the list of tests.
|
||||
|
||||
You can also use `ctest` directly if you are in the right folder. So
|
||||
to run stage1 tests with a 300 second timeout run this:
|
||||
|
||||
```bash
|
||||
cd build/release/stage1
|
||||
ctest -j 4 --output-on-failure --timeout 300
|
||||
```
|
||||
Useful `ctest` flags are `-R <name of test>` to run a single test, and
|
||||
`--rerun-failed` to run all tests that failed during the last run.
|
||||
You can also pass `ctest` flags via `make test ARGS="--rerun-failed"`.
|
||||
|
||||
To get verbose output from ctest pass the `--verbose` command line
|
||||
option. Test output is normally suppressed and only summary
|
||||
information is displayed. This option will show all test output.
|
||||
|
||||
## Test Suite Organization
|
||||
|
||||
All these tests are included by [src/shell/CMakeLists.txt](https://github.com/leanprover/lean4/blob/master/src/shell/CMakeLists.txt):
|
||||
|
||||
- [`tests/lean`](https://github.com/leanprover/lean4/tree/master/tests/lean/): contains tests that come equipped with a
|
||||
.lean.expected.out file. The driver script [`test_single.sh`](https://github.com/leanprover/lean4/tree/master/tests/lean/test_single.sh) runs
|
||||
each test and checks the actual output (*.produced.out) with the
|
||||
checked in expected output.
|
||||
|
||||
- [`tests/lean/run`](https://github.com/leanprover/lean4/tree/master/tests/lean/run/): contains tests that are run through the lean
|
||||
command line one file at a time. These tests only look for error
|
||||
codes and do not check the expected output even though output is
|
||||
produced, it is ignored.
|
||||
|
||||
**Note:** Tests in this directory run with `-Dlinter.all=false` to reduce noise.
|
||||
If your test needs to verify linter behavior (e.g., deprecation warnings),
|
||||
explicitly enable the relevant linter with `set_option linter.<name> true`.
|
||||
|
||||
- [`tests/lean/interactive`](https://github.com/leanprover/lean4/tree/master/tests/lean/interactive/): are designed to test server requests at a
|
||||
given position in the input file. Each .lean file contains comments
|
||||
that indicate how to simulate a client request at that position.
|
||||
using a `--^` point to the line position. Example:
|
||||
```lean,ignore
|
||||
open Foo in
|
||||
theorem tst2 (h : a ≤ b) : a + 2 ≤ b + 2 :=
|
||||
Bla.
|
||||
--^ completion
|
||||
```
|
||||
In this example, the test driver [`test_single.sh`](https://github.com/leanprover/lean4/tree/master/tests/lean/interactive/test_single.sh) will simulate an
|
||||
auto-completion request at `Bla.`. The expected output is stored in
|
||||
a .lean.expected.out in the json format that is part of the
|
||||
[Language Server
|
||||
Protocol](https://microsoft.github.io/language-server-protocol/).
|
||||
|
||||
This can also be used to test the following additional requests:
|
||||
```
|
||||
--^ textDocument/hover
|
||||
--^ textDocument/typeDefinition
|
||||
--^ textDocument/definition
|
||||
--^ $/lean/plainGoal
|
||||
--^ $/lean/plainTermGoal
|
||||
--^ insert: ...
|
||||
--^ collectDiagnostics
|
||||
```
|
||||
|
||||
- [`tests/lean/server`](https://github.com/leanprover/lean4/tree/master/tests/lean/server/): Tests more of the Lean `--server` protocol.
|
||||
There are just a few of them, and it uses .log files containing
|
||||
JSON.
|
||||
|
||||
- [`tests/compiler`](https://github.com/leanprover/lean4/tree/master/tests/compiler/): contains tests that will run the Lean compiler and
|
||||
build an executable that is executed and the output is compared to
|
||||
the .lean.expected.out file. This test also contains a subfolder
|
||||
[`foreign`](https://github.com/leanprover/lean4/tree/master/tests/compiler/foreign/) which shows how to extend Lean using C++.
|
||||
|
||||
- [`tests/lean/trust0`](https://github.com/leanprover/lean4/tree/master/tests/lean/trust0): tests that run Lean in a mode that Lean doesn't
|
||||
even trust the .olean files (i.e., trust 0).
|
||||
|
||||
- [`tests/bench`](https://github.com/leanprover/lean4/tree/master/tests/bench/): contains performance tests.
|
||||
|
||||
- [`tests/plugin`](https://github.com/leanprover/lean4/tree/master/tests/plugin/): tests that compiled Lean code can be loaded into
|
||||
`lean` via the `--plugin` command line option.
|
||||
|
||||
## Writing Good Tests
|
||||
|
||||
Every test file should contain:
|
||||
* an initial `/-! -/` module docstring summarizing the test's purpose
|
||||
* a module docstring for each test section that describes what is tested
|
||||
and, if not 100% clear, why that is the desirable behavior
|
||||
|
||||
At the time of writing, most tests do not follow these new guidelines yet.
|
||||
For an example of a conforming test, see [`tests/lean/1971.lean`](https://github.com/leanprover/lean4/tree/master/tests/lean/1971.lean).
|
||||
|
||||
## Fixing Tests
|
||||
|
||||
When the Lean source code or the standard library are modified, some of the
|
||||
tests break because the produced output is slightly different, and we have
|
||||
to reflect the changes in the `.lean.expected.out` files.
|
||||
We should not blindly copy the new produced output since we may accidentally
|
||||
miss a bug introduced by recent changes.
|
||||
The test suite contains commands that allow us to see what changed in a convenient way.
|
||||
First, we must install [meld](http://meldmerge.org/). On Ubuntu, we can do it by simply executing
|
||||
|
||||
```
|
||||
sudo apt-get install meld
|
||||
```
|
||||
|
||||
Now, suppose `bad_class.lean` test is broken. We can see the problem by going to [`tests/lean`](https://github.com/leanprover/lean4/tree/master/tests/lean) directory and
|
||||
executing
|
||||
|
||||
```
|
||||
./test_single.sh -i bad_class.lean
|
||||
```
|
||||
|
||||
When the `-i` option is provided, `meld` is automatically invoked
|
||||
whenever there is discrepancy between the produced and expected
|
||||
outputs. `meld` can also be used to repair the problems.
|
||||
|
||||
In Emacs, we can also execute `M-x lean4-diff-test-file` to check/diff the file of the current buffer.
|
||||
To mass-copy all `.produced.out` files to the respective `.expected.out` file, use `tests/lean/copy-produced`.
|
||||
2
doc/examples/.gitignore
vendored
Normal file
2
doc/examples/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
*.out.produced
|
||||
*.exit.produced
|
||||
2
doc/examples/bintree.lean.out.expected
Normal file
2
doc/examples/bintree.lean.out.expected
Normal file
@@ -0,0 +1,2 @@
|
||||
Tree.node (Tree.node (Tree.leaf) 1 "one" (Tree.leaf)) 2 "two" (Tree.node (Tree.leaf) 3 "three" (Tree.leaf))
|
||||
[(1, "one"), (2, "two"), (3, "three")]
|
||||
4
doc/examples/compiler/run_test.sh
Normal file
4
doc/examples/compiler/run_test.sh
Normal file
@@ -0,0 +1,4 @@
|
||||
leanmake --always-make bin
|
||||
|
||||
capture ./build/bin/test hello world
|
||||
check_out_contains "[hello, world]"
|
||||
1
doc/examples/compiler/test.lean.out.expected
Normal file
1
doc/examples/compiler/test.lean.out.expected
Normal file
@@ -0,0 +1 @@
|
||||
[hello, world]
|
||||
3
doc/examples/interp.lean.out.expected
Normal file
3
doc/examples/interp.lean.out.expected
Normal file
@@ -0,0 +1,3 @@
|
||||
30
|
||||
interp.lean:146:4: warning: declaration uses `sorry`
|
||||
3628800
|
||||
2
doc/examples/palindromes.lean.out.expected
Normal file
2
doc/examples/palindromes.lean.out.expected
Normal file
@@ -0,0 +1,2 @@
|
||||
true
|
||||
false
|
||||
2
doc/examples/phoas.lean.out.expected
Normal file
2
doc/examples/phoas.lean.out.expected
Normal file
@@ -0,0 +1,2 @@
|
||||
"(((fun x_1 => (fun x_2 => (x_1 + x_2))) 1) 2)"
|
||||
"((((fun x_1 => (fun x_2 => (x_1 + x_2))) 1) 2) + 5)"
|
||||
4
doc/examples/run_test.sh
Normal file
4
doc/examples/run_test.sh
Normal file
@@ -0,0 +1,4 @@
|
||||
capture_only "$1" \
|
||||
lean -Dlinter.all=false "$1"
|
||||
check_exit_is_success
|
||||
check_out_file
|
||||
@@ -1,4 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
source ../../tests/common.sh
|
||||
|
||||
exec_check_raw lean -Dlinter.all=false "$f"
|
||||
@@ -1,60 +0,0 @@
|
||||
{
|
||||
"folders": [
|
||||
{
|
||||
"path": "."
|
||||
}
|
||||
],
|
||||
"settings": {
|
||||
"files.insertFinalNewline": true,
|
||||
"files.trimTrailingWhitespace": true,
|
||||
"cmake.buildDirectory": "${workspaceFolder}/build/release",
|
||||
"cmake.generator": "Unix Makefiles",
|
||||
"[markdown]": {
|
||||
"rewrap.wrappingColumn": 70
|
||||
},
|
||||
"[lean4]": {
|
||||
"editor.rulers": [
|
||||
100
|
||||
]
|
||||
}
|
||||
},
|
||||
"tasks": {
|
||||
"version": "2.0.0",
|
||||
"tasks": [
|
||||
{
|
||||
"label": "build",
|
||||
"type": "shell",
|
||||
"command": "make -C build/release -j$(nproc 2>/dev/null || sysctl -n hw.logicalcpu 2>/dev/null || echo 4)",
|
||||
"problemMatcher": [],
|
||||
"group": {
|
||||
"kind": "build",
|
||||
"isDefault": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"label": "build-old",
|
||||
"type": "shell",
|
||||
"command": "make -C build/release -j$(nproc 2>/dev/null || sysctl -n hw.logicalcpu 2>/dev/null || echo 4) LAKE_EXTRA_ARGS=--old",
|
||||
"problemMatcher": [],
|
||||
"group": {
|
||||
"kind": "build"
|
||||
}
|
||||
},
|
||||
{
|
||||
"label": "test",
|
||||
"type": "shell",
|
||||
"command": "NPROC=$(nproc 2>/dev/null || sysctl -n hw.logicalcpu 2>/dev/null || echo 4); CTEST_OUTPUT_ON_FAILURE=1 make -C build/release test -j$NPROC ARGS=\"-j$NPROC\"",
|
||||
"problemMatcher": [],
|
||||
"group": {
|
||||
"kind": "test",
|
||||
"isDefault": true
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
"extensions": {
|
||||
"recommendations": [
|
||||
"leanprover.lean4"
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env python
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (c) 2015 Microsoft Corporation. All rights reserved.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env python
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (c) 2015 Microsoft Corporation. All rights reserved.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
# Profile a Lean binary with demangled names.
|
||||
#
|
||||
# Usage:
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Lean name demangler.
|
||||
Lean name demangler — thin wrapper around the Lean CLI tool.
|
||||
|
||||
Demangles C symbol names produced by the Lean 4 compiler back into
|
||||
readable Lean hierarchical names.
|
||||
Spawns ``lean --run lean_demangle_cli.lean`` as a persistent subprocess
|
||||
and communicates via stdin/stdout pipes. This ensures a single source
|
||||
of truth for demangling logic (the Lean implementation in
|
||||
``Lean.Compiler.NameDemangling``).
|
||||
|
||||
Usage as a filter (like c++filt):
|
||||
echo "l_Lean_Meta_Sym_main" | python lean_demangle.py
|
||||
@@ -13,767 +15,68 @@ Usage as a module:
|
||||
print(demangle_lean_name("l_Lean_Meta_Sym_main"))
|
||||
"""
|
||||
|
||||
import atexit
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# String.mangle / unmangle
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def _is_ascii_alnum(ch):
|
||||
"""Check if ch is an ASCII letter or digit (matching Lean's isAlpha/isDigit)."""
|
||||
return ('a' <= ch <= 'z') or ('A' <= ch <= 'Z') or ('0' <= ch <= '9')
|
||||
|
||||
|
||||
def mangle_string(s):
|
||||
"""Port of Lean's String.mangle: escape a single string for C identifiers."""
|
||||
result = []
|
||||
for ch in s:
|
||||
if _is_ascii_alnum(ch):
|
||||
result.append(ch)
|
||||
elif ch == '_':
|
||||
result.append('__')
|
||||
else:
|
||||
code = ord(ch)
|
||||
if code < 0x100:
|
||||
result.append('_x' + format(code, '02x'))
|
||||
elif code < 0x10000:
|
||||
result.append('_u' + format(code, '04x'))
|
||||
else:
|
||||
result.append('_U' + format(code, '08x'))
|
||||
return ''.join(result)
|
||||
|
||||
|
||||
def _parse_hex(s, pos, n):
|
||||
"""Parse n lowercase hex digits at pos. Returns (new_pos, value) or None."""
|
||||
if pos + n > len(s):
|
||||
return None
|
||||
val = 0
|
||||
for i in range(n):
|
||||
c = s[pos + i]
|
||||
if '0' <= c <= '9':
|
||||
val = (val << 4) | (ord(c) - ord('0'))
|
||||
elif 'a' <= c <= 'f':
|
||||
val = (val << 4) | (ord(c) - ord('a') + 10)
|
||||
else:
|
||||
return None
|
||||
return (pos + n, val)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Name mangling (for round-trip verification)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def _check_disambiguation(m):
|
||||
"""Port of Lean's checkDisambiguation: does mangled string m need a '00' prefix?"""
|
||||
pos = 0
|
||||
while pos < len(m):
|
||||
ch = m[pos]
|
||||
if ch == '_':
|
||||
pos += 1
|
||||
continue
|
||||
if ch == 'x':
|
||||
return _parse_hex(m, pos + 1, 2) is not None
|
||||
if ch == 'u':
|
||||
return _parse_hex(m, pos + 1, 4) is not None
|
||||
if ch == 'U':
|
||||
return _parse_hex(m, pos + 1, 8) is not None
|
||||
if '0' <= ch <= '9':
|
||||
return True
|
||||
return False
|
||||
# all underscores or empty
|
||||
return True
|
||||
|
||||
|
||||
def _need_disambiguation(prev_component, mangled_next):
|
||||
"""Port of Lean's needDisambiguation."""
|
||||
# Check if previous component (as a string) ends with '_'
|
||||
prev_ends_underscore = (isinstance(prev_component, str) and
|
||||
len(prev_component) > 0 and
|
||||
prev_component[-1] == '_')
|
||||
return prev_ends_underscore or _check_disambiguation(mangled_next)
|
||||
|
||||
|
||||
def mangle_name(components, prefix="l_"):
|
||||
"""
|
||||
Mangle a list of name components (str or int) into a C symbol.
|
||||
Port of Lean's Name.mangle.
|
||||
"""
|
||||
if not components:
|
||||
return prefix
|
||||
|
||||
parts = []
|
||||
prev = None
|
||||
for i, comp in enumerate(components):
|
||||
if isinstance(comp, int):
|
||||
if i == 0:
|
||||
parts.append(str(comp) + '_')
|
||||
else:
|
||||
parts.append('_' + str(comp) + '_')
|
||||
else:
|
||||
m = mangle_string(comp)
|
||||
if i == 0:
|
||||
if _check_disambiguation(m):
|
||||
parts.append('00' + m)
|
||||
else:
|
||||
parts.append(m)
|
||||
else:
|
||||
if _need_disambiguation(prev, m):
|
||||
parts.append('_00' + m)
|
||||
else:
|
||||
parts.append('_' + m)
|
||||
prev = comp
|
||||
|
||||
return prefix + ''.join(parts)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Name demangling
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def demangle_body(s):
|
||||
"""
|
||||
Demangle a string produced by Name.mangleAux (without prefix).
|
||||
Returns a list of components (str or int).
|
||||
|
||||
This is a faithful port of Lean's Name.demangleAux from NameMangling.lean.
|
||||
"""
|
||||
components = []
|
||||
length = len(s)
|
||||
|
||||
def emit(comp):
|
||||
components.append(comp)
|
||||
|
||||
def decode_num(pos, n):
|
||||
"""Parse remaining digits, emit numeric component, continue."""
|
||||
while pos < length:
|
||||
ch = s[pos]
|
||||
if '0' <= ch <= '9':
|
||||
n = n * 10 + (ord(ch) - ord('0'))
|
||||
pos += 1
|
||||
else:
|
||||
# Expect '_' (trailing underscore of numeric encoding)
|
||||
pos += 1 # skip '_'
|
||||
emit(n)
|
||||
if pos >= length:
|
||||
return pos
|
||||
# Skip separator '_' and go to name_start
|
||||
pos += 1
|
||||
return name_start(pos)
|
||||
# End of string
|
||||
emit(n)
|
||||
return pos
|
||||
|
||||
def name_start(pos):
|
||||
"""Start parsing a new name component."""
|
||||
if pos >= length:
|
||||
return pos
|
||||
ch = s[pos]
|
||||
pos += 1
|
||||
if '0' <= ch <= '9':
|
||||
# Check for '00' disambiguation
|
||||
if ch == '0' and pos < length and s[pos] == '0':
|
||||
pos += 1
|
||||
return demangle_main(pos, "", 0)
|
||||
else:
|
||||
return decode_num(pos, ord(ch) - ord('0'))
|
||||
elif ch == '_':
|
||||
return demangle_main(pos, "", 1)
|
||||
else:
|
||||
return demangle_main(pos, ch, 0)
|
||||
|
||||
def demangle_main(pos, acc, ucount):
|
||||
"""Main demangling loop."""
|
||||
while pos < length:
|
||||
ch = s[pos]
|
||||
pos += 1
|
||||
|
||||
if ch == '_':
|
||||
ucount += 1
|
||||
continue
|
||||
|
||||
if ucount % 2 == 0:
|
||||
# Even underscores: literal underscores in component name
|
||||
acc += '_' * (ucount // 2) + ch
|
||||
ucount = 0
|
||||
continue
|
||||
|
||||
# Odd ucount: separator or escape
|
||||
if '0' <= ch <= '9':
|
||||
# End current str component, start number
|
||||
emit(acc + '_' * (ucount // 2))
|
||||
if ch == '0' and pos < length and s[pos] == '0':
|
||||
pos += 1
|
||||
return demangle_main(pos, "", 0)
|
||||
else:
|
||||
return decode_num(pos, ord(ch) - ord('0'))
|
||||
|
||||
# Try hex escapes
|
||||
if ch == 'x':
|
||||
result = _parse_hex(s, pos, 2)
|
||||
if result is not None:
|
||||
new_pos, val = result
|
||||
acc += '_' * (ucount // 2) + chr(val)
|
||||
pos = new_pos
|
||||
ucount = 0
|
||||
continue
|
||||
|
||||
if ch == 'u':
|
||||
result = _parse_hex(s, pos, 4)
|
||||
if result is not None:
|
||||
new_pos, val = result
|
||||
acc += '_' * (ucount // 2) + chr(val)
|
||||
pos = new_pos
|
||||
ucount = 0
|
||||
continue
|
||||
|
||||
if ch == 'U':
|
||||
result = _parse_hex(s, pos, 8)
|
||||
if result is not None:
|
||||
new_pos, val = result
|
||||
acc += '_' * (ucount // 2) + chr(val)
|
||||
pos = new_pos
|
||||
ucount = 0
|
||||
continue
|
||||
|
||||
# Name separator
|
||||
emit(acc)
|
||||
acc = '_' * (ucount // 2) + ch
|
||||
ucount = 0
|
||||
|
||||
# End of string
|
||||
acc += '_' * (ucount // 2)
|
||||
if acc:
|
||||
emit(acc)
|
||||
return pos
|
||||
|
||||
name_start(0)
|
||||
return components
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Prefix handling for lp_ (package prefix)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def _is_valid_string_mangle(s):
|
||||
"""Check if s is a valid output of String.mangle (no trailing bare _)."""
|
||||
pos = 0
|
||||
length = len(s)
|
||||
while pos < length:
|
||||
ch = s[pos]
|
||||
if _is_ascii_alnum(ch):
|
||||
pos += 1
|
||||
elif ch == '_':
|
||||
if pos + 1 >= length:
|
||||
return False # trailing bare _
|
||||
nch = s[pos + 1]
|
||||
if nch == '_':
|
||||
pos += 2
|
||||
elif nch == 'x' and _parse_hex(s, pos + 2, 2) is not None:
|
||||
pos = _parse_hex(s, pos + 2, 2)[0]
|
||||
elif nch == 'u' and _parse_hex(s, pos + 2, 4) is not None:
|
||||
pos = _parse_hex(s, pos + 2, 4)[0]
|
||||
elif nch == 'U' and _parse_hex(s, pos + 2, 8) is not None:
|
||||
pos = _parse_hex(s, pos + 2, 8)[0]
|
||||
else:
|
||||
return False
|
||||
else:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def _skip_string_mangle(s, pos):
|
||||
"""
|
||||
Skip past a String.mangle output in s starting at pos.
|
||||
Returns the position after the mangled string (where we expect the separator '_').
|
||||
This is a greedy scan.
|
||||
"""
|
||||
length = len(s)
|
||||
while pos < length:
|
||||
ch = s[pos]
|
||||
if _is_ascii_alnum(ch):
|
||||
pos += 1
|
||||
elif ch == '_':
|
||||
if pos + 1 < length:
|
||||
nch = s[pos + 1]
|
||||
if nch == '_':
|
||||
pos += 2
|
||||
elif nch == 'x' and _parse_hex(s, pos + 2, 2) is not None:
|
||||
pos = _parse_hex(s, pos + 2, 2)[0]
|
||||
elif nch == 'u' and _parse_hex(s, pos + 2, 4) is not None:
|
||||
pos = _parse_hex(s, pos + 2, 4)[0]
|
||||
elif nch == 'U' and _parse_hex(s, pos + 2, 8) is not None:
|
||||
pos = _parse_hex(s, pos + 2, 8)[0]
|
||||
else:
|
||||
return pos # bare '_': separator
|
||||
else:
|
||||
return pos
|
||||
else:
|
||||
return pos
|
||||
return pos
|
||||
|
||||
|
||||
def _find_lp_body(s):
|
||||
"""
|
||||
Given s = everything after 'lp_' in a symbol, find where the declaration
|
||||
body (Name.mangleAux output) starts.
|
||||
Returns the start index of the body within s, or None.
|
||||
|
||||
Strategy: try all candidate split points where the package part is a valid
|
||||
String.mangle output and the body round-trips. Prefer the longest valid
|
||||
package name (most specific match).
|
||||
"""
|
||||
length = len(s)
|
||||
|
||||
# Collect candidate split positions: every '_' that could be the separator
|
||||
candidates = []
|
||||
pos = 0
|
||||
while pos < length:
|
||||
if s[pos] == '_':
|
||||
candidates.append(pos)
|
||||
pos += 1
|
||||
|
||||
# Try each candidate; collect all valid splits
|
||||
valid_splits = []
|
||||
for split_pos in candidates:
|
||||
pkg_part = s[:split_pos]
|
||||
if not pkg_part:
|
||||
continue
|
||||
if not _is_valid_string_mangle(pkg_part):
|
||||
continue
|
||||
body = s[split_pos + 1:]
|
||||
if not body:
|
||||
continue
|
||||
components = demangle_body(body)
|
||||
if not components:
|
||||
continue
|
||||
remangled = mangle_name(components, prefix="")
|
||||
if remangled == body:
|
||||
first = components[0]
|
||||
# Score: prefer first component starting with uppercase
|
||||
has_upper = isinstance(first, str) and first and first[0].isupper()
|
||||
valid_splits.append((split_pos, has_upper))
|
||||
|
||||
if valid_splits:
|
||||
# Among splits where first decl component starts uppercase, pick longest pkg.
|
||||
# Otherwise pick shortest pkg.
|
||||
upper_splits = [s for s in valid_splits if s[1]]
|
||||
if upper_splits:
|
||||
best = max(upper_splits, key=lambda x: x[0])
|
||||
else:
|
||||
best = min(valid_splits, key=lambda x: x[0])
|
||||
return best[0] + 1
|
||||
|
||||
# Fallback: greedy String.mangle scan
|
||||
greedy_pos = _skip_string_mangle(s, 0)
|
||||
if greedy_pos < length and s[greedy_pos] == '_':
|
||||
return greedy_pos + 1
|
||||
|
||||
return None
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Format name components for display
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def format_name(components):
|
||||
"""Format a list of name components as a dot-separated string."""
|
||||
return '.'.join(str(c) for c in components)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Human-friendly postprocessing
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
# Compiler-generated suffix components — exact match
|
||||
_SUFFIX_FLAGS_EXACT = {
|
||||
'_redArg': 'arity\u2193',
|
||||
'_boxed': 'boxed',
|
||||
'_impl': 'impl',
|
||||
}
|
||||
|
||||
# Compiler-generated suffix prefixes — match with optional _N index
|
||||
# e.g., _lam, _lam_0, _lam_3, _lambda_0, _closed_2
|
||||
_SUFFIX_FLAGS_PREFIX = {
|
||||
'_lam': '\u03bb',
|
||||
'_lambda': '\u03bb',
|
||||
'_elam': '\u03bb',
|
||||
'_jp': 'jp',
|
||||
'_closed': 'closed',
|
||||
}
|
||||
|
||||
|
||||
def _match_suffix(component):
|
||||
"""
|
||||
Check if a string component is a compiler-generated suffix.
|
||||
Returns the flag label or None.
|
||||
|
||||
Handles both exact matches (_redArg, _boxed) and indexed suffixes
|
||||
(_lam_0, _lambda_2, _closed_0) produced by appendIndexAfter.
|
||||
"""
|
||||
if not isinstance(component, str):
|
||||
return None
|
||||
if component in _SUFFIX_FLAGS_EXACT:
|
||||
return _SUFFIX_FLAGS_EXACT[component]
|
||||
if component in _SUFFIX_FLAGS_PREFIX:
|
||||
return _SUFFIX_FLAGS_PREFIX[component]
|
||||
# Check for indexed suffix: prefix + _N
|
||||
for prefix, label in _SUFFIX_FLAGS_PREFIX.items():
|
||||
if component.startswith(prefix + '_'):
|
||||
rest = component[len(prefix) + 1:]
|
||||
if rest.isdigit():
|
||||
return label
|
||||
return None
|
||||
|
||||
|
||||
def _strip_private(components):
|
||||
"""Strip _private.Module.0. prefix. Returns (stripped_parts, is_private)."""
|
||||
if (len(components) >= 3 and isinstance(components[0], str) and
|
||||
components[0] == '_private'):
|
||||
for i in range(1, len(components)):
|
||||
if components[i] == 0:
|
||||
if i + 1 < len(components):
|
||||
return components[i + 1:], True
|
||||
break
|
||||
return components, False
|
||||
|
||||
|
||||
def _strip_spec_suffixes(components):
|
||||
"""Strip trailing spec_N components (from appendIndexAfter)."""
|
||||
parts = list(components)
|
||||
while parts and isinstance(parts[-1], str) and parts[-1].startswith('spec_'):
|
||||
rest = parts[-1][5:]
|
||||
if rest.isdigit():
|
||||
parts.pop()
|
||||
else:
|
||||
break
|
||||
return parts
|
||||
|
||||
|
||||
def _is_spec_index(component):
|
||||
"""Check if a component is a spec_N index (from appendIndexAfter)."""
|
||||
return (isinstance(component, str) and
|
||||
component.startswith('spec_') and component[5:].isdigit())
|
||||
|
||||
|
||||
def _parse_spec_entries(rest):
|
||||
"""Parse _at_..._spec pairs into separate spec context entries.
|
||||
|
||||
Given components starting from the first _at_, returns:
|
||||
- entries: list of component lists, one per _at_..._spec block
|
||||
- remaining: components after the last _spec N (trailing suffixes)
|
||||
"""
|
||||
entries = []
|
||||
current_ctx = None
|
||||
remaining = []
|
||||
skip_next = False
|
||||
|
||||
for p in rest:
|
||||
if skip_next:
|
||||
skip_next = False
|
||||
continue
|
||||
if isinstance(p, str) and p == '_at_':
|
||||
if current_ctx is not None:
|
||||
entries.append(current_ctx)
|
||||
current_ctx = []
|
||||
continue
|
||||
if isinstance(p, str) and p == '_spec':
|
||||
if current_ctx is not None:
|
||||
entries.append(current_ctx)
|
||||
current_ctx = None
|
||||
skip_next = True
|
||||
continue
|
||||
if isinstance(p, str) and p.startswith('_spec'):
|
||||
if current_ctx is not None:
|
||||
entries.append(current_ctx)
|
||||
current_ctx = None
|
||||
continue
|
||||
if current_ctx is not None:
|
||||
current_ctx.append(p)
|
||||
else:
|
||||
remaining.append(p)
|
||||
|
||||
if current_ctx is not None:
|
||||
entries.append(current_ctx)
|
||||
|
||||
return entries, remaining
|
||||
|
||||
|
||||
def _process_spec_context(components):
|
||||
"""Process a spec context into a clean name and its flags.
|
||||
|
||||
Returns (name_parts, flags) where name_parts are the cleaned components
|
||||
and flags is a deduplicated list of flag labels from compiler suffixes.
|
||||
"""
|
||||
parts = list(components)
|
||||
parts, _ = _strip_private(parts)
|
||||
|
||||
name_parts = []
|
||||
ctx_flags = []
|
||||
seen = set()
|
||||
|
||||
for p in parts:
|
||||
flag = _match_suffix(p)
|
||||
if flag is not None:
|
||||
if flag not in seen:
|
||||
ctx_flags.append(flag)
|
||||
seen.add(flag)
|
||||
elif _is_spec_index(p):
|
||||
pass
|
||||
else:
|
||||
name_parts.append(p)
|
||||
|
||||
return name_parts, ctx_flags
|
||||
|
||||
|
||||
def postprocess_name(components):
|
||||
"""
|
||||
Transform raw demangled components into a human-friendly display string.
|
||||
|
||||
Applies:
|
||||
- Private name cleanup: _private.Module.0.Name.foo -> Name.foo [private]
|
||||
- Hygienic name cleanup: strips _@.module._hygCtx._hyg.N
|
||||
- Suffix folding: _redArg, _boxed, _lam_0, etc. -> [flags]
|
||||
- Specialization: f._at_.g._spec.N -> f spec at g
|
||||
Shown after base [flags], with context flags: spec at g[ctx_flags]
|
||||
"""
|
||||
if not components:
|
||||
return ""
|
||||
|
||||
parts = list(components)
|
||||
flags = []
|
||||
spec_entries = []
|
||||
|
||||
# --- Strip _private prefix ---
|
||||
parts, is_private = _strip_private(parts)
|
||||
|
||||
# --- Strip hygienic suffixes: everything from _@ onward ---
|
||||
at_idx = None
|
||||
for i, p in enumerate(parts):
|
||||
if isinstance(p, str) and p.startswith('_@'):
|
||||
at_idx = i
|
||||
break
|
||||
if at_idx is not None:
|
||||
parts = parts[:at_idx]
|
||||
|
||||
# --- Handle specialization: _at_ ... _spec N ---
|
||||
at_positions = [i for i, p in enumerate(parts)
|
||||
if isinstance(p, str) and p == '_at_']
|
||||
if at_positions:
|
||||
first_at = at_positions[0]
|
||||
base = parts[:first_at]
|
||||
rest = parts[first_at:]
|
||||
|
||||
entries, remaining = _parse_spec_entries(rest)
|
||||
for ctx_components in entries:
|
||||
ctx_name, ctx_flags = _process_spec_context(ctx_components)
|
||||
if ctx_name or ctx_flags:
|
||||
spec_entries.append((ctx_name, ctx_flags))
|
||||
|
||||
parts = base + remaining
|
||||
|
||||
# --- Collect suffix flags from the end ---
|
||||
while parts:
|
||||
last = parts[-1]
|
||||
flag = _match_suffix(last)
|
||||
if flag is not None:
|
||||
flags.append(flag)
|
||||
parts.pop()
|
||||
elif isinstance(last, int) and len(parts) >= 2:
|
||||
prev_flag = _match_suffix(parts[-2])
|
||||
if prev_flag is not None:
|
||||
flags.append(prev_flag)
|
||||
parts.pop() # remove the number
|
||||
parts.pop() # remove the suffix
|
||||
else:
|
||||
break
|
||||
else:
|
||||
break
|
||||
|
||||
if is_private:
|
||||
flags.append('private')
|
||||
|
||||
# --- Format result ---
|
||||
name = '.'.join(str(c) for c in parts) if parts else '?'
|
||||
result = name
|
||||
if flags:
|
||||
flag_str = ', '.join(flags)
|
||||
result += f' [{flag_str}]'
|
||||
|
||||
for ctx_name, ctx_flags in spec_entries:
|
||||
ctx_str = '.'.join(str(c) for c in ctx_name) if ctx_name else '?'
|
||||
if ctx_flags:
|
||||
ctx_flag_str = ', '.join(ctx_flags)
|
||||
result += f' spec at {ctx_str}[{ctx_flag_str}]'
|
||||
else:
|
||||
result += f' spec at {ctx_str}'
|
||||
|
||||
return result
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Main demangling entry point
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def demangle_lean_name_raw(mangled):
|
||||
"""
|
||||
Demangle a Lean C symbol, preserving all internal name components.
|
||||
|
||||
Returns the exact demangled name with all compiler-generated suffixes
|
||||
intact. Use demangle_lean_name() for human-friendly output.
|
||||
"""
|
||||
try:
|
||||
return _demangle_lean_name_inner(mangled, human_friendly=False)
|
||||
except Exception:
|
||||
return mangled
|
||||
_process = None
|
||||
_script_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
_cli_script = os.path.join(_script_dir, "lean_demangle_cli.lean")
|
||||
|
||||
|
||||
def _get_process():
|
||||
"""Get or create the persistent Lean demangler subprocess."""
|
||||
global _process
|
||||
if _process is not None and _process.poll() is None:
|
||||
return _process
|
||||
|
||||
lean = os.environ.get("LEAN", "lean")
|
||||
_process = subprocess.Popen(
|
||||
[lean, "--run", _cli_script],
|
||||
stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.DEVNULL,
|
||||
text=True,
|
||||
bufsize=1, # line buffered
|
||||
)
|
||||
atexit.register(_cleanup)
|
||||
return _process
|
||||
|
||||
|
||||
def _cleanup():
|
||||
global _process
|
||||
if _process is not None:
|
||||
try:
|
||||
_process.stdin.close()
|
||||
_process.wait(timeout=5)
|
||||
except Exception:
|
||||
_process.kill()
|
||||
_process = None
|
||||
|
||||
|
||||
def demangle_lean_name(mangled):
|
||||
"""
|
||||
Demangle a C symbol name produced by the Lean 4 compiler.
|
||||
|
||||
Returns a human-friendly demangled name with compiler suffixes folded
|
||||
into readable flags. Use demangle_lean_name_raw() to preserve all
|
||||
internal components.
|
||||
Returns a human-friendly demangled name, or the original string
|
||||
if it is not a Lean symbol.
|
||||
"""
|
||||
try:
|
||||
return _demangle_lean_name_inner(mangled, human_friendly=True)
|
||||
proc = _get_process()
|
||||
proc.stdin.write(mangled + "\n")
|
||||
proc.stdin.flush()
|
||||
result = proc.stdout.readline().rstrip("\n")
|
||||
return result if result else mangled
|
||||
except Exception:
|
||||
return mangled
|
||||
|
||||
|
||||
def _demangle_lean_name_inner(mangled, human_friendly=True):
|
||||
"""Inner demangle that may raise on malformed input."""
|
||||
|
||||
if mangled == "_lean_main":
|
||||
return "[lean] main"
|
||||
|
||||
# Handle lean_ runtime functions
|
||||
if human_friendly and mangled.startswith("lean_apply_"):
|
||||
rest = mangled[11:]
|
||||
if rest.isdigit():
|
||||
return f"<apply/{rest}>"
|
||||
|
||||
# Strip .cold.N suffix (LLVM linker cold function clones)
|
||||
cold_suffix = ""
|
||||
core = mangled
|
||||
dot_pos = core.find('.cold.')
|
||||
if dot_pos >= 0:
|
||||
cold_suffix = " " + core[dot_pos:]
|
||||
core = core[:dot_pos]
|
||||
elif core.endswith('.cold'):
|
||||
cold_suffix = " .cold"
|
||||
core = core[:-5]
|
||||
|
||||
result = _demangle_core(core, human_friendly)
|
||||
if result is None:
|
||||
return mangled
|
||||
return result + cold_suffix
|
||||
|
||||
|
||||
def _demangle_core(mangled, human_friendly=True):
|
||||
"""Demangle a symbol without .cold suffix. Returns None if not a Lean name."""
|
||||
|
||||
fmt = postprocess_name if human_friendly else format_name
|
||||
|
||||
# _init_ prefix
|
||||
if mangled.startswith("_init_"):
|
||||
rest = mangled[6:]
|
||||
body, pkg_display = _strip_lean_prefix(rest)
|
||||
if body is None:
|
||||
return None
|
||||
components = demangle_body(body)
|
||||
if not components:
|
||||
return None
|
||||
name = fmt(components)
|
||||
if pkg_display:
|
||||
return f"[init] {name} ({pkg_display})"
|
||||
return f"[init] {name}"
|
||||
|
||||
# initialize_ prefix (module init functions)
|
||||
if mangled.startswith("initialize_"):
|
||||
rest = mangled[11:]
|
||||
# With package: initialize_lp_{pkg}_{body} or initialize_l_{body}
|
||||
body, pkg_display = _strip_lean_prefix(rest)
|
||||
if body is not None:
|
||||
components = demangle_body(body)
|
||||
if components:
|
||||
name = fmt(components)
|
||||
if pkg_display:
|
||||
return f"[module_init] {name} ({pkg_display})"
|
||||
return f"[module_init] {name}"
|
||||
# Without package: initialize_{Name.mangleAux(moduleName)}
|
||||
if rest:
|
||||
components = demangle_body(rest)
|
||||
if components:
|
||||
return f"[module_init] {fmt(components)}"
|
||||
return None
|
||||
|
||||
# l_ or lp_ prefix
|
||||
body, pkg_display = _strip_lean_prefix(mangled)
|
||||
if body is None:
|
||||
return None
|
||||
components = demangle_body(body)
|
||||
if not components:
|
||||
return None
|
||||
name = fmt(components)
|
||||
if pkg_display:
|
||||
return f"{name} ({pkg_display})"
|
||||
return name
|
||||
|
||||
|
||||
def _strip_lean_prefix(s):
|
||||
"""
|
||||
Strip the l_ or lp_ prefix from a mangled symbol.
|
||||
Returns (body, pkg_display) where body is the Name.mangleAux output
|
||||
and pkg_display is None or a string describing the package.
|
||||
Returns (None, None) if the string doesn't have a recognized prefix.
|
||||
"""
|
||||
if s.startswith("l_"):
|
||||
return (s[2:], None)
|
||||
|
||||
if s.startswith("lp_"):
|
||||
after_lp = s[3:]
|
||||
body_start = _find_lp_body(after_lp)
|
||||
if body_start is not None:
|
||||
pkg_mangled = after_lp[:body_start - 1]
|
||||
# Unmangle the package name
|
||||
pkg_components = demangle_body(pkg_mangled)
|
||||
if pkg_components and len(pkg_components) == 1 and isinstance(pkg_components[0], str):
|
||||
pkg_display = pkg_components[0]
|
||||
else:
|
||||
pkg_display = pkg_mangled
|
||||
return (after_lp[body_start:], pkg_display)
|
||||
# Fallback: treat everything after lp_ as body
|
||||
return (after_lp, "?")
|
||||
|
||||
return (None, None)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# CLI
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def main():
|
||||
"""Filter stdin or arguments, demangling Lean names."""
|
||||
import argparse
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Demangle Lean 4 C symbol names (like c++filt for Lean)")
|
||||
parser.add_argument('names', nargs='*',
|
||||
help='Names to demangle (reads stdin if none given)')
|
||||
parser.add_argument('--raw', action='store_true',
|
||||
help='Output exact demangled names without postprocessing')
|
||||
args = parser.parse_args()
|
||||
|
||||
demangle = demangle_lean_name_raw if args.raw else demangle_lean_name
|
||||
|
||||
if args.names:
|
||||
for name in args.names:
|
||||
print(demangle(name))
|
||||
else:
|
||||
for line in sys.stdin:
|
||||
print(demangle(line.rstrip('\n')))
|
||||
"""Filter stdin, demangling Lean names."""
|
||||
for line in sys.stdin:
|
||||
print(demangle_lean_name(line.rstrip("\n")))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
32
script/profiler/lean_demangle_cli.lean
Normal file
32
script/profiler/lean_demangle_cli.lean
Normal file
@@ -0,0 +1,32 @@
|
||||
/-
|
||||
Copyright (c) 2026 Lean FRO, LLC. All rights reserved.
|
||||
Released under Apache 2.0 license as described in the file LICENSE.
|
||||
Authors: Kim Morrison
|
||||
-/
|
||||
module
|
||||
|
||||
import Lean.Compiler.NameDemangling
|
||||
|
||||
/-!
|
||||
Lean name demangler CLI tool. Reads mangled symbol names from stdin (one per
|
||||
line) and writes demangled names to stdout. Non-Lean symbols pass through
|
||||
unchanged. Like `c++filt` but for Lean names.
|
||||
|
||||
Usage:
|
||||
echo "l_Lean_Meta_foo" | lean --run lean_demangle_cli.lean
|
||||
cat symbols.txt | lean --run lean_demangle_cli.lean
|
||||
-/
|
||||
|
||||
open Lean.Name.Demangle
|
||||
|
||||
def main : IO Unit := do
|
||||
let stdin ← IO.getStdin
|
||||
let stdout ← IO.getStdout
|
||||
repeat do
|
||||
let line ← stdin.getLine
|
||||
if line.isEmpty then break
|
||||
let sym := line.trimRight
|
||||
match demangleSymbol sym with
|
||||
| some s => stdout.putStrLn s
|
||||
| none => stdout.putStrLn sym
|
||||
stdout.flush
|
||||
@@ -1,670 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Tests for the Lean name demangler."""
|
||||
|
||||
import unittest
|
||||
import json
|
||||
import gzip
|
||||
import tempfile
|
||||
import os
|
||||
|
||||
from lean_demangle import (
|
||||
mangle_string, mangle_name, demangle_body, format_name,
|
||||
demangle_lean_name, demangle_lean_name_raw, postprocess_name,
|
||||
_parse_hex, _check_disambiguation,
|
||||
)
|
||||
|
||||
|
||||
class TestStringMangle(unittest.TestCase):
|
||||
"""Test String.mangle (character-level escaping)."""
|
||||
|
||||
def test_alphanumeric(self):
|
||||
self.assertEqual(mangle_string("hello"), "hello")
|
||||
self.assertEqual(mangle_string("abc123"), "abc123")
|
||||
|
||||
def test_underscore(self):
|
||||
self.assertEqual(mangle_string("a_b"), "a__b")
|
||||
self.assertEqual(mangle_string("_"), "__")
|
||||
self.assertEqual(mangle_string("__"), "____")
|
||||
|
||||
def test_special_chars(self):
|
||||
self.assertEqual(mangle_string("."), "_x2e")
|
||||
self.assertEqual(mangle_string("a.b"), "a_x2eb")
|
||||
|
||||
def test_unicode(self):
|
||||
self.assertEqual(mangle_string("\u03bb"), "_u03bb")
|
||||
self.assertEqual(mangle_string("\U0001d55c"), "_U0001d55c")
|
||||
|
||||
def test_empty(self):
|
||||
self.assertEqual(mangle_string(""), "")
|
||||
|
||||
|
||||
class TestNameMangle(unittest.TestCase):
|
||||
"""Test Name.mangle (hierarchical name mangling)."""
|
||||
|
||||
def test_simple(self):
|
||||
self.assertEqual(mangle_name(["Lean", "Meta", "Sym", "main"]),
|
||||
"l_Lean_Meta_Sym_main")
|
||||
|
||||
def test_single_component(self):
|
||||
self.assertEqual(mangle_name(["main"]), "l_main")
|
||||
|
||||
def test_numeric_component(self):
|
||||
self.assertEqual(
|
||||
mangle_name(["_private", "Lean", "Meta", "Basic", 0,
|
||||
"Lean", "Meta", "withMVarContextImp"]),
|
||||
"l___private_Lean_Meta_Basic_0__Lean_Meta_withMVarContextImp")
|
||||
|
||||
def test_component_with_underscore(self):
|
||||
self.assertEqual(mangle_name(["a_b"]), "l_a__b")
|
||||
self.assertEqual(mangle_name(["a_b", "c"]), "l_a__b_c")
|
||||
|
||||
def test_disambiguation_digit_start(self):
|
||||
self.assertEqual(mangle_name(["0foo"]), "l_000foo")
|
||||
|
||||
def test_disambiguation_escape_start(self):
|
||||
self.assertEqual(mangle_name(["a", "x27"]), "l_a_00x27")
|
||||
|
||||
def test_numeric_root(self):
|
||||
self.assertEqual(mangle_name([42]), "l_42_")
|
||||
self.assertEqual(mangle_name([42, "foo"]), "l_42__foo")
|
||||
|
||||
def test_component_ending_with_underscore(self):
|
||||
self.assertEqual(mangle_name(["a_", "b"]), "l_a___00b")
|
||||
|
||||
def test_custom_prefix(self):
|
||||
self.assertEqual(mangle_name(["foo"], prefix="lp_pkg_"),
|
||||
"lp_pkg_foo")
|
||||
|
||||
|
||||
class TestDemangleBody(unittest.TestCase):
|
||||
"""Test demangle_body (the core Name.demangleAux algorithm)."""
|
||||
|
||||
def test_simple(self):
|
||||
self.assertEqual(demangle_body("Lean_Meta_Sym_main"),
|
||||
["Lean", "Meta", "Sym", "main"])
|
||||
|
||||
def test_single(self):
|
||||
self.assertEqual(demangle_body("main"), ["main"])
|
||||
|
||||
def test_empty(self):
|
||||
self.assertEqual(demangle_body(""), [])
|
||||
|
||||
def test_underscore_in_component(self):
|
||||
self.assertEqual(demangle_body("a__b"), ["a_b"])
|
||||
self.assertEqual(demangle_body("a__b_c"), ["a_b", "c"])
|
||||
|
||||
def test_numeric_component(self):
|
||||
self.assertEqual(demangle_body("foo_42__bar"), ["foo", 42, "bar"])
|
||||
|
||||
def test_numeric_root(self):
|
||||
self.assertEqual(demangle_body("42_"), [42])
|
||||
|
||||
def test_numeric_at_end(self):
|
||||
self.assertEqual(demangle_body("foo_42_"), ["foo", 42])
|
||||
|
||||
def test_disambiguation_00(self):
|
||||
self.assertEqual(demangle_body("a_00x27"), ["a", "x27"])
|
||||
|
||||
def test_disambiguation_00_at_root(self):
|
||||
self.assertEqual(demangle_body("000foo"), ["0foo"])
|
||||
|
||||
def test_hex_escape_x(self):
|
||||
self.assertEqual(demangle_body("a_x2eb"), ["a.b"])
|
||||
|
||||
def test_hex_escape_u(self):
|
||||
self.assertEqual(demangle_body("_u03bb"), ["\u03bb"])
|
||||
|
||||
def test_hex_escape_U(self):
|
||||
self.assertEqual(demangle_body("_U0001d55c"), ["\U0001d55c"])
|
||||
|
||||
def test_private_name(self):
|
||||
body = "__private_Lean_Meta_Basic_0__Lean_Meta_withMVarContextImp"
|
||||
self.assertEqual(demangle_body(body),
|
||||
["_private", "Lean", "Meta", "Basic", 0,
|
||||
"Lean", "Meta", "withMVarContextImp"])
|
||||
|
||||
def test_boxed_suffix(self):
|
||||
body = "foo___boxed"
|
||||
self.assertEqual(demangle_body(body), ["foo", "_boxed"])
|
||||
|
||||
def test_redArg_suffix(self):
|
||||
body = "foo_bar___redArg"
|
||||
self.assertEqual(demangle_body(body), ["foo", "bar", "_redArg"])
|
||||
|
||||
def test_component_ending_underscore_disambiguation(self):
|
||||
self.assertEqual(demangle_body("a___00b"), ["a_", "b"])
|
||||
|
||||
|
||||
class TestRoundTrip(unittest.TestCase):
|
||||
"""Test that mangle(demangle(x)) == x for various names."""
|
||||
|
||||
def _check_roundtrip(self, components):
|
||||
mangled = mangle_name(components, prefix="")
|
||||
demangled = demangle_body(mangled)
|
||||
self.assertEqual(demangled, components,
|
||||
f"Round-trip failed: {components} -> '{mangled}' -> {demangled}")
|
||||
mangled_with_prefix = mangle_name(components, prefix="l_")
|
||||
self.assertTrue(mangled_with_prefix.startswith("l_"))
|
||||
body = mangled_with_prefix[2:]
|
||||
demangled2 = demangle_body(body)
|
||||
self.assertEqual(demangled2, components)
|
||||
|
||||
def test_simple_names(self):
|
||||
self._check_roundtrip(["Lean", "Meta", "main"])
|
||||
self._check_roundtrip(["a"])
|
||||
self._check_roundtrip(["Foo", "Bar", "baz"])
|
||||
|
||||
def test_numeric(self):
|
||||
self._check_roundtrip(["foo", 0, "bar"])
|
||||
self._check_roundtrip([42])
|
||||
self._check_roundtrip(["a", 1, "b", 2, "c"])
|
||||
|
||||
def test_underscores(self):
|
||||
self._check_roundtrip(["_private"])
|
||||
self._check_roundtrip(["a_b", "c_d"])
|
||||
self._check_roundtrip(["_at_", "_spec"])
|
||||
|
||||
def test_private_name(self):
|
||||
self._check_roundtrip(["_private", "Lean", "Meta", "Basic", 0,
|
||||
"Lean", "Meta", "withMVarContextImp"])
|
||||
|
||||
def test_boxed(self):
|
||||
self._check_roundtrip(["Lean", "Meta", "foo", "_boxed"])
|
||||
|
||||
def test_redArg(self):
|
||||
self._check_roundtrip(["Lean", "Meta", "foo", "_redArg"])
|
||||
|
||||
def test_specialization(self):
|
||||
self._check_roundtrip(["List", "map", "_at_", "Foo", "bar", "_spec", 3])
|
||||
|
||||
def test_lambda(self):
|
||||
self._check_roundtrip(["Foo", "bar", "_lambda", 0])
|
||||
self._check_roundtrip(["Foo", "bar", "_lambda", 2])
|
||||
|
||||
def test_closed(self):
|
||||
self._check_roundtrip(["myConst", "_closed", 0])
|
||||
|
||||
def test_special_chars(self):
|
||||
self._check_roundtrip(["a.b"])
|
||||
self._check_roundtrip(["\u03bb"])
|
||||
self._check_roundtrip(["a", "b\u2192c"])
|
||||
|
||||
def test_disambiguation_cases(self):
|
||||
self._check_roundtrip(["a", "x27"])
|
||||
self._check_roundtrip(["0foo"])
|
||||
self._check_roundtrip(["a_", "b"])
|
||||
|
||||
def test_complex_real_names(self):
|
||||
"""Names modeled after real Lean compiler output."""
|
||||
self._check_roundtrip(
|
||||
["Lean", "MVarId", "withContext", "_at_",
|
||||
"_private", "Lean", "Meta", "Sym", 0,
|
||||
"Lean", "Meta", "Sym", "BackwardRule", "apply",
|
||||
"_spec", 2, "_redArg", "_lambda", 0, "_boxed"])
|
||||
|
||||
|
||||
class TestDemangleRaw(unittest.TestCase):
|
||||
"""Test demangle_lean_name_raw (exact demangling, no postprocessing)."""
|
||||
|
||||
def test_l_prefix(self):
|
||||
self.assertEqual(
|
||||
demangle_lean_name_raw("l_Lean_Meta_Sym_main"),
|
||||
"Lean.Meta.Sym.main")
|
||||
|
||||
def test_l_prefix_private(self):
|
||||
result = demangle_lean_name_raw(
|
||||
"l___private_Lean_Meta_Basic_0__Lean_Meta_withMVarContextImp")
|
||||
self.assertEqual(result,
|
||||
"_private.Lean.Meta.Basic.0.Lean.Meta.withMVarContextImp")
|
||||
|
||||
def test_l_prefix_boxed(self):
|
||||
result = demangle_lean_name_raw("l_foo___boxed")
|
||||
self.assertEqual(result, "foo._boxed")
|
||||
|
||||
def test_l_prefix_redArg(self):
|
||||
result = demangle_lean_name_raw(
|
||||
"l___private_Lean_Meta_Basic_0__Lean_Meta_withMVarContextImp___redArg")
|
||||
self.assertEqual(
|
||||
result,
|
||||
"_private.Lean.Meta.Basic.0.Lean.Meta.withMVarContextImp._redArg")
|
||||
|
||||
def test_lean_main(self):
|
||||
self.assertEqual(demangle_lean_name_raw("_lean_main"), "[lean] main")
|
||||
|
||||
def test_non_lean_names(self):
|
||||
self.assertEqual(demangle_lean_name_raw("printf"), "printf")
|
||||
self.assertEqual(demangle_lean_name_raw("malloc"), "malloc")
|
||||
self.assertEqual(demangle_lean_name_raw("lean_apply_5"), "lean_apply_5")
|
||||
self.assertEqual(demangle_lean_name_raw(""), "")
|
||||
|
||||
def test_init_prefix(self):
|
||||
result = demangle_lean_name_raw("_init_l_Lean_Meta_foo")
|
||||
self.assertEqual(result, "[init] Lean.Meta.foo")
|
||||
|
||||
def test_lp_prefix_simple(self):
|
||||
mangled = mangle_name(["Lean", "Meta", "foo"], prefix="lp_std_")
|
||||
self.assertEqual(mangled, "lp_std_Lean_Meta_foo")
|
||||
result = demangle_lean_name_raw(mangled)
|
||||
self.assertEqual(result, "Lean.Meta.foo (std)")
|
||||
|
||||
def test_lp_prefix_underscore_pkg(self):
|
||||
pkg_mangled = mangle_string("my_pkg")
|
||||
self.assertEqual(pkg_mangled, "my__pkg")
|
||||
mangled = mangle_name(["Lean", "Meta", "foo"],
|
||||
prefix=f"lp_{pkg_mangled}_")
|
||||
self.assertEqual(mangled, "lp_my__pkg_Lean_Meta_foo")
|
||||
result = demangle_lean_name_raw(mangled)
|
||||
self.assertEqual(result, "Lean.Meta.foo (my_pkg)")
|
||||
|
||||
def test_lp_prefix_private_decl(self):
|
||||
mangled = mangle_name(
|
||||
["_private", "X", 0, "Y", "foo"], prefix="lp_pkg_")
|
||||
self.assertEqual(mangled, "lp_pkg___private_X_0__Y_foo")
|
||||
result = demangle_lean_name_raw(mangled)
|
||||
self.assertEqual(result, "_private.X.0.Y.foo (pkg)")
|
||||
|
||||
def test_complex_specialization(self):
|
||||
components = [
|
||||
"Lean", "MVarId", "withContext", "_at_",
|
||||
"_private", "Lean", "Meta", "Sym", 0,
|
||||
"Lean", "Meta", "Sym", "BackwardRule", "apply",
|
||||
"_spec", 2, "_redArg", "_lambda", 0, "_boxed"
|
||||
]
|
||||
mangled = mangle_name(components)
|
||||
result = demangle_lean_name_raw(mangled)
|
||||
expected = format_name(components)
|
||||
self.assertEqual(result, expected)
|
||||
|
||||
def test_cold_suffix(self):
|
||||
result = demangle_lean_name_raw("l_Lean_Meta_foo___redArg.cold.1")
|
||||
self.assertEqual(result, "Lean.Meta.foo._redArg .cold.1")
|
||||
|
||||
def test_cold_suffix_plain(self):
|
||||
result = demangle_lean_name_raw("l_Lean_Meta_foo.cold")
|
||||
self.assertEqual(result, "Lean.Meta.foo .cold")
|
||||
|
||||
def test_initialize_no_pkg(self):
|
||||
result = demangle_lean_name_raw("initialize_Init_Control_Basic")
|
||||
self.assertEqual(result, "[module_init] Init.Control.Basic")
|
||||
|
||||
def test_initialize_with_l_prefix(self):
|
||||
result = demangle_lean_name_raw("initialize_l_Lean_Meta_foo")
|
||||
self.assertEqual(result, "[module_init] Lean.Meta.foo")
|
||||
|
||||
def test_never_crashes(self):
|
||||
"""Demangling should never raise, just return the original."""
|
||||
weird_inputs = [
|
||||
"", "l_", "lp_", "lp_x", "_init_", "initialize_",
|
||||
"l_____", "lp____", "l_00", "l_0",
|
||||
"some random string", "l_ space",
|
||||
]
|
||||
for inp in weird_inputs:
|
||||
result = demangle_lean_name_raw(inp)
|
||||
self.assertIsInstance(result, str)
|
||||
|
||||
|
||||
class TestPostprocess(unittest.TestCase):
|
||||
"""Test postprocess_name (human-friendly suffix folding, etc.)."""
|
||||
|
||||
def test_no_change(self):
|
||||
self.assertEqual(postprocess_name(["Lean", "Meta", "main"]),
|
||||
"Lean.Meta.main")
|
||||
|
||||
def test_boxed(self):
|
||||
self.assertEqual(postprocess_name(["foo", "_boxed"]),
|
||||
"foo [boxed]")
|
||||
|
||||
def test_redArg(self):
|
||||
self.assertEqual(postprocess_name(["foo", "bar", "_redArg"]),
|
||||
"foo.bar [arity\u2193]")
|
||||
|
||||
def test_lambda_separate(self):
|
||||
# _lam as separate component + numeric index
|
||||
self.assertEqual(postprocess_name(["foo", "_lam", 0]),
|
||||
"foo [\u03bb]")
|
||||
|
||||
def test_lambda_indexed(self):
|
||||
# _lam_0 as single string (appendIndexAfter)
|
||||
self.assertEqual(postprocess_name(["foo", "_lam_0"]),
|
||||
"foo [\u03bb]")
|
||||
self.assertEqual(postprocess_name(["foo", "_lambda_2"]),
|
||||
"foo [\u03bb]")
|
||||
|
||||
def test_lambda_boxed(self):
|
||||
# _lam_0 followed by _boxed
|
||||
self.assertEqual(
|
||||
postprocess_name(["Lean", "Meta", "Simp", "simpLambda",
|
||||
"_lam_0", "_boxed"]),
|
||||
"Lean.Meta.Simp.simpLambda [boxed, \u03bb]")
|
||||
|
||||
def test_closed(self):
|
||||
self.assertEqual(postprocess_name(["myConst", "_closed", 3]),
|
||||
"myConst [closed]")
|
||||
|
||||
def test_closed_indexed(self):
|
||||
self.assertEqual(postprocess_name(["myConst", "_closed_0"]),
|
||||
"myConst [closed]")
|
||||
|
||||
def test_multiple_suffixes(self):
|
||||
self.assertEqual(postprocess_name(["foo", "_redArg", "_boxed"]),
|
||||
"foo [boxed, arity\u2193]")
|
||||
|
||||
def test_redArg_lam(self):
|
||||
# _redArg followed by _lam_0 (issue #4)
|
||||
self.assertEqual(
|
||||
postprocess_name(["Lean", "profileitIOUnsafe",
|
||||
"_redArg", "_lam_0"]),
|
||||
"Lean.profileitIOUnsafe [\u03bb, arity\u2193]")
|
||||
|
||||
def test_private_name(self):
|
||||
self.assertEqual(
|
||||
postprocess_name(["_private", "Lean", "Meta", "Basic", 0,
|
||||
"Lean", "Meta", "withMVarContextImp"]),
|
||||
"Lean.Meta.withMVarContextImp [private]")
|
||||
|
||||
def test_private_with_suffix(self):
|
||||
self.assertEqual(
|
||||
postprocess_name(["_private", "Lean", "Meta", "Basic", 0,
|
||||
"Lean", "Meta", "foo", "_redArg"]),
|
||||
"Lean.Meta.foo [arity\u2193, private]")
|
||||
|
||||
def test_hygienic_strip(self):
|
||||
self.assertEqual(
|
||||
postprocess_name(["Lean", "Meta", "foo", "_@", "Lean", "Meta",
|
||||
"_hyg", 42]),
|
||||
"Lean.Meta.foo")
|
||||
|
||||
def test_specialization(self):
|
||||
self.assertEqual(
|
||||
postprocess_name(["List", "map", "_at_", "Foo", "bar",
|
||||
"_spec", 3]),
|
||||
"List.map spec at Foo.bar")
|
||||
|
||||
def test_specialization_with_suffix(self):
|
||||
# Base suffix _boxed appears in [flags] before spec at
|
||||
self.assertEqual(
|
||||
postprocess_name(["Lean", "MVarId", "withContext", "_at_",
|
||||
"Foo", "bar", "_spec", 2, "_boxed"]),
|
||||
"Lean.MVarId.withContext [boxed] spec at Foo.bar")
|
||||
|
||||
def test_spec_context_with_flags(self):
|
||||
# Compiler suffixes in spec context become context flags
|
||||
self.assertEqual(
|
||||
postprocess_name(["Lean", "Meta", "foo", "_at_",
|
||||
"Lean", "Meta", "bar", "_elam_1", "_redArg",
|
||||
"_spec", 2]),
|
||||
"Lean.Meta.foo spec at Lean.Meta.bar[\u03bb, arity\u2193]")
|
||||
|
||||
def test_spec_context_flags_dedup(self):
|
||||
# Duplicate flag labels are deduplicated
|
||||
self.assertEqual(
|
||||
postprocess_name(["f", "_at_",
|
||||
"g", "_lam_0", "_elam_1", "_redArg",
|
||||
"_spec", 1]),
|
||||
"f spec at g[\u03bb, arity\u2193]")
|
||||
|
||||
def test_multiple_at(self):
|
||||
# Multiple _at_ entries become separate spec at clauses
|
||||
self.assertEqual(
|
||||
postprocess_name(["f", "_at_", "g", "_spec", 1,
|
||||
"_at_", "h", "_spec", 2]),
|
||||
"f spec at g spec at h")
|
||||
|
||||
def test_multiple_at_with_flags(self):
|
||||
# Multiple spec at with flags on base and contexts
|
||||
self.assertEqual(
|
||||
postprocess_name(["f", "_at_", "g", "_redArg", "_spec", 1,
|
||||
"_at_", "h", "_lam_0", "_spec", 2,
|
||||
"_boxed"]),
|
||||
"f [boxed] spec at g[arity\u2193] spec at h[\u03bb]")
|
||||
|
||||
def test_base_flags_before_spec(self):
|
||||
# Base trailing suffixes appear in [flags] before spec at
|
||||
self.assertEqual(
|
||||
postprocess_name(["f", "_at_", "g", "_spec", 1, "_lam_0"]),
|
||||
"f [\u03bb] spec at g")
|
||||
|
||||
def test_spec_context_strip_spec_suffixes(self):
|
||||
# spec_0 in context should be stripped
|
||||
self.assertEqual(
|
||||
postprocess_name(["Lean", "Meta", "transformWithCache", "visit",
|
||||
"_at_",
|
||||
"_private", "Lean", "Meta", "Transform", 0,
|
||||
"Lean", "Meta", "transform",
|
||||
"Lean", "Meta", "Sym", "unfoldReducible",
|
||||
"spec_0", "spec_0",
|
||||
"_spec", 1]),
|
||||
"Lean.Meta.transformWithCache.visit "
|
||||
"spec at Lean.Meta.transform.Lean.Meta.Sym.unfoldReducible")
|
||||
|
||||
def test_spec_context_strip_private(self):
|
||||
# _private in spec context should be stripped
|
||||
self.assertEqual(
|
||||
postprocess_name(["Array", "mapMUnsafe", "map", "_at_",
|
||||
"_private", "Lean", "Meta", "Transform", 0,
|
||||
"Lean", "Meta", "transformWithCache", "visit",
|
||||
"_spec", 1]),
|
||||
"Array.mapMUnsafe.map "
|
||||
"spec at Lean.Meta.transformWithCache.visit")
|
||||
|
||||
def test_empty(self):
|
||||
self.assertEqual(postprocess_name([]), "")
|
||||
|
||||
|
||||
class TestDemangleHumanFriendly(unittest.TestCase):
|
||||
"""Test demangle_lean_name (human-friendly output)."""
|
||||
|
||||
def test_simple(self):
|
||||
self.assertEqual(demangle_lean_name("l_Lean_Meta_main"),
|
||||
"Lean.Meta.main")
|
||||
|
||||
def test_boxed(self):
|
||||
self.assertEqual(demangle_lean_name("l_foo___boxed"),
|
||||
"foo [boxed]")
|
||||
|
||||
def test_redArg(self):
|
||||
self.assertEqual(demangle_lean_name("l_foo___redArg"),
|
||||
"foo [arity\u2193]")
|
||||
|
||||
def test_private(self):
|
||||
self.assertEqual(
|
||||
demangle_lean_name(
|
||||
"l___private_Lean_Meta_Basic_0__Lean_Meta_foo"),
|
||||
"Lean.Meta.foo [private]")
|
||||
|
||||
def test_private_with_redArg(self):
|
||||
self.assertEqual(
|
||||
demangle_lean_name(
|
||||
"l___private_Lean_Meta_Basic_0__Lean_Meta_foo___redArg"),
|
||||
"Lean.Meta.foo [arity\u2193, private]")
|
||||
|
||||
def test_cold_with_suffix(self):
|
||||
self.assertEqual(
|
||||
demangle_lean_name("l_Lean_Meta_foo___redArg.cold.1"),
|
||||
"Lean.Meta.foo [arity\u2193] .cold.1")
|
||||
|
||||
def test_lean_apply(self):
|
||||
self.assertEqual(demangle_lean_name("lean_apply_5"), "<apply/5>")
|
||||
self.assertEqual(demangle_lean_name("lean_apply_12"), "<apply/12>")
|
||||
|
||||
def test_lean_apply_raw_unchanged(self):
|
||||
self.assertEqual(demangle_lean_name_raw("lean_apply_5"),
|
||||
"lean_apply_5")
|
||||
|
||||
def test_init_private(self):
|
||||
self.assertEqual(
|
||||
demangle_lean_name(
|
||||
"_init_l___private_X_0__Y_foo"),
|
||||
"[init] Y.foo [private]")
|
||||
|
||||
def test_complex_specialization(self):
|
||||
components = [
|
||||
"Lean", "MVarId", "withContext", "_at_",
|
||||
"_private", "Lean", "Meta", "Sym", 0,
|
||||
"Lean", "Meta", "Sym", "BackwardRule", "apply",
|
||||
"_spec", 2, "_redArg", "_lambda", 0, "_boxed"
|
||||
]
|
||||
mangled = mangle_name(components)
|
||||
result = demangle_lean_name(mangled)
|
||||
# Base: Lean.MVarId.withContext with trailing _redArg, _lambda 0, _boxed
|
||||
# Spec context: Lean.Meta.Sym.BackwardRule.apply (private stripped)
|
||||
self.assertEqual(
|
||||
result,
|
||||
"Lean.MVarId.withContext [boxed, \u03bb, arity\u2193] "
|
||||
"spec at Lean.Meta.Sym.BackwardRule.apply")
|
||||
|
||||
def test_non_lean_unchanged(self):
|
||||
self.assertEqual(demangle_lean_name("printf"), "printf")
|
||||
self.assertEqual(demangle_lean_name("malloc"), "malloc")
|
||||
self.assertEqual(demangle_lean_name(""), "")
|
||||
|
||||
|
||||
class TestDemangleProfile(unittest.TestCase):
|
||||
"""Test the profile rewriter."""
|
||||
|
||||
def _make_profile_shared(self, strings):
|
||||
"""Create a profile with shared.stringArray (newer format)."""
|
||||
return {
|
||||
"meta": {"version": 28},
|
||||
"libs": [],
|
||||
"shared": {
|
||||
"stringArray": list(strings),
|
||||
},
|
||||
"threads": [{
|
||||
"name": "main",
|
||||
"pid": "1",
|
||||
"tid": 1,
|
||||
"funcTable": {
|
||||
"name": list(range(len(strings))),
|
||||
"isJS": [False] * len(strings),
|
||||
"relevantForJS": [False] * len(strings),
|
||||
"resource": [-1] * len(strings),
|
||||
"fileName": [None] * len(strings),
|
||||
"lineNumber": [None] * len(strings),
|
||||
"columnNumber": [None] * len(strings),
|
||||
"length": len(strings),
|
||||
},
|
||||
"frameTable": {"length": 0},
|
||||
"stackTable": {"length": 0},
|
||||
"samples": {"length": 0},
|
||||
"markers": {"length": 0},
|
||||
"resourceTable": {"length": 0},
|
||||
"nativeSymbols": {"length": 0},
|
||||
}],
|
||||
"pages": [],
|
||||
"counters": [],
|
||||
}
|
||||
|
||||
def _make_profile_per_thread(self, strings):
|
||||
"""Create a profile with per-thread stringArray (samply format)."""
|
||||
return {
|
||||
"meta": {"version": 28},
|
||||
"libs": [],
|
||||
"threads": [{
|
||||
"name": "main",
|
||||
"pid": "1",
|
||||
"tid": 1,
|
||||
"stringArray": list(strings),
|
||||
"funcTable": {
|
||||
"name": list(range(len(strings))),
|
||||
"isJS": [False] * len(strings),
|
||||
"relevantForJS": [False] * len(strings),
|
||||
"resource": [-1] * len(strings),
|
||||
"fileName": [None] * len(strings),
|
||||
"lineNumber": [None] * len(strings),
|
||||
"columnNumber": [None] * len(strings),
|
||||
"length": len(strings),
|
||||
},
|
||||
"frameTable": {"length": 0},
|
||||
"stackTable": {"length": 0},
|
||||
"samples": {"length": 0},
|
||||
"markers": {"length": 0},
|
||||
"resourceTable": {"length": 0},
|
||||
"nativeSymbols": {"length": 0},
|
||||
}],
|
||||
"pages": [],
|
||||
"counters": [],
|
||||
}
|
||||
|
||||
def test_profile_rewrite_shared(self):
|
||||
from lean_demangle_profile import rewrite_profile
|
||||
strings = [
|
||||
"l_Lean_Meta_Sym_main",
|
||||
"printf",
|
||||
"lean_apply_5",
|
||||
"l___private_Lean_Meta_Basic_0__Lean_Meta_foo",
|
||||
]
|
||||
profile = self._make_profile_shared(strings)
|
||||
rewrite_profile(profile)
|
||||
sa = profile["shared"]["stringArray"]
|
||||
self.assertEqual(sa[0], "Lean.Meta.Sym.main")
|
||||
self.assertEqual(sa[1], "printf")
|
||||
self.assertEqual(sa[2], "<apply/5>")
|
||||
self.assertEqual(sa[3], "Lean.Meta.foo [private]")
|
||||
|
||||
def test_profile_rewrite_per_thread(self):
|
||||
from lean_demangle_profile import rewrite_profile
|
||||
strings = [
|
||||
"l_Lean_Meta_Sym_main",
|
||||
"printf",
|
||||
"lean_apply_5",
|
||||
"l___private_Lean_Meta_Basic_0__Lean_Meta_foo",
|
||||
]
|
||||
profile = self._make_profile_per_thread(strings)
|
||||
count = rewrite_profile(profile)
|
||||
sa = profile["threads"][0]["stringArray"]
|
||||
self.assertEqual(sa[0], "Lean.Meta.Sym.main")
|
||||
self.assertEqual(sa[1], "printf")
|
||||
self.assertEqual(sa[2], "<apply/5>")
|
||||
self.assertEqual(sa[3], "Lean.Meta.foo [private]")
|
||||
self.assertEqual(count, 3)
|
||||
|
||||
def test_profile_json_roundtrip(self):
|
||||
from lean_demangle_profile import process_profile_file
|
||||
strings = ["l_Lean_Meta_main", "malloc"]
|
||||
profile = self._make_profile_shared(strings)
|
||||
|
||||
with tempfile.NamedTemporaryFile(mode='w', suffix='.json',
|
||||
delete=False) as f:
|
||||
json.dump(profile, f)
|
||||
inpath = f.name
|
||||
|
||||
outpath = inpath.replace('.json', '-demangled.json')
|
||||
try:
|
||||
process_profile_file(inpath, outpath)
|
||||
with open(outpath) as f:
|
||||
result = json.load(f)
|
||||
self.assertEqual(result["shared"]["stringArray"][0],
|
||||
"Lean.Meta.main")
|
||||
self.assertEqual(result["shared"]["stringArray"][1], "malloc")
|
||||
finally:
|
||||
os.unlink(inpath)
|
||||
if os.path.exists(outpath):
|
||||
os.unlink(outpath)
|
||||
|
||||
def test_profile_gzip_roundtrip(self):
|
||||
from lean_demangle_profile import process_profile_file
|
||||
strings = ["l_Lean_Meta_main", "malloc"]
|
||||
profile = self._make_profile_shared(strings)
|
||||
|
||||
with tempfile.NamedTemporaryFile(suffix='.json.gz',
|
||||
delete=False) as f:
|
||||
with gzip.open(f, 'wt') as gz:
|
||||
json.dump(profile, gz)
|
||||
inpath = f.name
|
||||
|
||||
outpath = inpath.replace('.json.gz', '-demangled.json.gz')
|
||||
try:
|
||||
process_profile_file(inpath, outpath)
|
||||
with gzip.open(outpath, 'rt') as f:
|
||||
result = json.load(f)
|
||||
self.assertEqual(result["shared"]["stringArray"][0],
|
||||
"Lean.Meta.main")
|
||||
finally:
|
||||
os.unlink(inpath)
|
||||
if os.path.exists(outpath):
|
||||
os.unlink(outpath)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
@@ -11,7 +11,7 @@ IMPORTANT: Keep this documentation up-to-date when modifying the script's behavi
|
||||
What this script does:
|
||||
1. Validates preliminary Lean4 release infrastructure:
|
||||
- Checks that the release branch (releases/vX.Y.0) exists
|
||||
- Verifies CMake version settings are correct
|
||||
- Verifies CMake version settings are correct (both src/ and stage0/)
|
||||
- Confirms the release tag exists
|
||||
- Validates the release page exists on GitHub (created automatically by CI after tag push)
|
||||
- Checks the release notes page on lean-lang.org (updated while bumping the `reference-manual` repository)
|
||||
@@ -326,6 +326,42 @@ def check_cmake_version(repo_url, branch, version_major, version_minor, github_t
|
||||
print(f" ✅ CMake version settings are correct in {cmake_file_path}")
|
||||
return True
|
||||
|
||||
def check_stage0_version(repo_url, branch, version_major, version_minor, github_token):
|
||||
"""Verify that stage0/src/CMakeLists.txt has the same version as src/CMakeLists.txt.
|
||||
|
||||
The stage0 pre-built binaries stamp .olean headers with their baked-in version.
|
||||
If stage0 has a different version (e.g. from a 'begin development cycle' bump),
|
||||
the release tarball will contain .olean files with the wrong version.
|
||||
"""
|
||||
stage0_cmake = "stage0/src/CMakeLists.txt"
|
||||
content = get_branch_content(repo_url, branch, stage0_cmake, github_token)
|
||||
if content is None:
|
||||
print(f" ❌ Could not retrieve {stage0_cmake} from {branch}")
|
||||
return False
|
||||
|
||||
errors = []
|
||||
for line in content.splitlines():
|
||||
stripped = line.strip()
|
||||
if stripped.startswith("set(LEAN_VERSION_MAJOR "):
|
||||
actual = stripped.split()[-1].rstrip(")")
|
||||
if actual != str(version_major):
|
||||
errors.append(f"LEAN_VERSION_MAJOR: expected {version_major}, found {actual}")
|
||||
elif stripped.startswith("set(LEAN_VERSION_MINOR "):
|
||||
actual = stripped.split()[-1].rstrip(")")
|
||||
if actual != str(version_minor):
|
||||
errors.append(f"LEAN_VERSION_MINOR: expected {version_minor}, found {actual}")
|
||||
|
||||
if errors:
|
||||
print(f" ❌ stage0 version mismatch in {stage0_cmake}:")
|
||||
for error in errors:
|
||||
print(f" {error}")
|
||||
print(f" The stage0 compiler stamps .olean headers with its baked-in version.")
|
||||
print(f" Run `make update-stage0` to rebuild stage0 with the correct version.")
|
||||
return False
|
||||
|
||||
print(f" ✅ stage0 version matches in {stage0_cmake}")
|
||||
return True
|
||||
|
||||
def extract_org_repo_from_url(repo_url):
|
||||
"""Extract the 'org/repo' part from a GitHub URL."""
|
||||
if repo_url.startswith("https://github.com/"):
|
||||
@@ -441,7 +477,10 @@ def get_pr_ci_status(repo_url, pr_number, github_token):
|
||||
conclusions = [run['conclusion'] for run in check_runs if run.get('status') == 'completed']
|
||||
in_progress = [run for run in check_runs if run.get('status') in ['queued', 'in_progress']]
|
||||
|
||||
failed = sum(1 for c in conclusions if c in ['failure', 'timed_out', 'action_required'])
|
||||
if in_progress:
|
||||
if failed > 0:
|
||||
return "failure", f"{failed} check(s) failing, {len(in_progress)} still in progress"
|
||||
return "pending", f"{len(in_progress)} check(s) in progress"
|
||||
|
||||
if not conclusions:
|
||||
@@ -450,7 +489,6 @@ def get_pr_ci_status(repo_url, pr_number, github_token):
|
||||
if all(c == 'success' for c in conclusions):
|
||||
return "success", f"All {len(conclusions)} checks passed"
|
||||
|
||||
failed = sum(1 for c in conclusions if c in ['failure', 'timed_out', 'action_required'])
|
||||
if failed > 0:
|
||||
return "failure", f"{failed} check(s) failed"
|
||||
|
||||
@@ -680,6 +718,9 @@ def main():
|
||||
# Check CMake version settings
|
||||
if not check_cmake_version(lean_repo_url, branch_name, version_major, version_minor, github_token):
|
||||
lean4_success = False
|
||||
# Check that stage0 version matches (stage0 stamps .olean headers with its version)
|
||||
if not check_stage0_version(lean_repo_url, branch_name, version_major, version_minor, github_token):
|
||||
lean4_success = False
|
||||
|
||||
# Check for tag and release page
|
||||
if not tag_exists(lean_repo_url, toolchain, github_token):
|
||||
@@ -965,14 +1006,15 @@ def main():
|
||||
# Find the actual minor version in CMakeLists.txt
|
||||
for line in cmake_lines:
|
||||
if line.strip().startswith("set(LEAN_VERSION_MINOR "):
|
||||
actual_minor = int(line.split()[-1].rstrip(")"))
|
||||
m = re.search(r'set\(LEAN_VERSION_MINOR\s+(\d+)', line)
|
||||
actual_minor = int(m.group(1)) if m else 0
|
||||
version_minor_correct = actual_minor >= next_minor
|
||||
break
|
||||
else:
|
||||
version_minor_correct = False
|
||||
|
||||
is_release_correct = any(
|
||||
l.strip().startswith("set(LEAN_VERSION_IS_RELEASE 0)")
|
||||
re.match(r'set\(LEAN_VERSION_IS_RELEASE\s+0[\s)]', l.strip())
|
||||
for l in cmake_lines
|
||||
)
|
||||
|
||||
|
||||
@@ -479,6 +479,25 @@ def execute_release_steps(repo, version, config):
|
||||
print(blue("Updating lakefile.toml..."))
|
||||
run_command(f'perl -pi -e \'s/"v4\\.[0-9]+(\\.[0-9]+)?(-rc[0-9]+)?"/"' + version + '"/g\' lakefile.*', cwd=repo_path)
|
||||
run_command("lake update", cwd=repo_path, stream_output=True)
|
||||
elif repo_name == "verso":
|
||||
# verso has nested Lake projects in test-projects/ that each have their own
|
||||
# lake-manifest.json with a subverso pin. After updating the root manifest via
|
||||
# `lake update`, sync the de-modulized subverso rev into all sub-manifests.
|
||||
# The sub-projects use an old toolchain (v4.21.0) that doesn't support module/prelude
|
||||
# syntax, so they need the de-modulized version (tagged no-modules/<root-rev>).
|
||||
# The "SubVerso version consistency" CI check accepts either the root or de-modulized rev.
|
||||
run_command("lake update", cwd=repo_path, stream_output=True)
|
||||
print(blue("Syncing de-modulized subverso rev to test-project sub-manifests..."))
|
||||
sync_script = (
|
||||
'ROOT_REV=$(jq -r \'.packages[] | select(.name == "subverso") | .rev\' lake-manifest.json); '
|
||||
'SUBVERSO_URL=$(jq -r \'.packages[] | select(.name == "subverso") | .url\' lake-manifest.json); '
|
||||
'DEMOD_REV=$(git ls-remote "$SUBVERSO_URL" "refs/tags/no-modules/$ROOT_REV" | awk \'{print $1}\'); '
|
||||
'find test-projects -name lake-manifest.json -print0 | while IFS= read -r -d \'\' f; do '
|
||||
'jq --arg rev "$DEMOD_REV" \'.packages |= map(if .name == "subverso" then .rev = $rev else . end)\' "$f" > /tmp/lm_tmp.json && mv /tmp/lm_tmp.json "$f"; '
|
||||
'done'
|
||||
)
|
||||
run_command(sync_script, cwd=repo_path)
|
||||
print(green("Synced de-modulized subverso rev to all test-project sub-manifests"))
|
||||
elif dependencies:
|
||||
run_command(f'perl -pi -e \'s/"v4\\.[0-9]+(\\.[0-9]+)?(-rc[0-9]+)?"/"' + version + '"/g\' lakefile.*', cwd=repo_path)
|
||||
run_command("lake update", cwd=repo_path, stream_output=True)
|
||||
|
||||
@@ -7,11 +7,17 @@ if(NOT DEFINED STAGE)
|
||||
endif()
|
||||
include(ExternalProject)
|
||||
project(LEAN CXX C)
|
||||
set(LEAN_VERSION_MAJOR 4)
|
||||
set(LEAN_VERSION_MINOR 30)
|
||||
set(LEAN_VERSION_PATCH 0)
|
||||
set(LEAN_VERSION_IS_RELEASE 0) # This number is 1 in the release revision, and 0 otherwise.
|
||||
set(LEAN_VERSION_MAJOR 4 CACHE STRING "")
|
||||
set(LEAN_VERSION_MINOR 30 CACHE STRING "")
|
||||
set(LEAN_VERSION_PATCH 0 CACHE STRING "")
|
||||
set(LEAN_VERSION_IS_RELEASE 0 CACHE STRING "") # This number is 1 in the release revision, and 0 otherwise.
|
||||
set(LEAN_SPECIAL_VERSION_DESC "" CACHE STRING "Additional version description like 'nightly-2018-03-11'")
|
||||
# project(LEAN) above implicitly creates empty LEAN_VERSION_{MAJOR,MINOR,PATCH}
|
||||
# normal variables (CMake sets <PROJECT>_VERSION_* for the project name). These
|
||||
# shadow the cache values. Remove them so ${VAR} falls through to the cache.
|
||||
unset(LEAN_VERSION_MAJOR)
|
||||
unset(LEAN_VERSION_MINOR)
|
||||
unset(LEAN_VERSION_PATCH)
|
||||
set(LEAN_VERSION_STRING "${LEAN_VERSION_MAJOR}.${LEAN_VERSION_MINOR}.${LEAN_VERSION_PATCH}")
|
||||
if(LEAN_SPECIAL_VERSION_DESC)
|
||||
string(APPEND LEAN_VERSION_STRING "-${LEAN_SPECIAL_VERSION_DESC}")
|
||||
@@ -81,6 +87,8 @@ option(USE_GITHASH "GIT_HASH" ON)
|
||||
option(INSTALL_LICENSE "INSTALL_LICENSE" ON)
|
||||
# When ON we install a copy of cadical
|
||||
option(INSTALL_CADICAL "Install a copy of cadical" ON)
|
||||
# When ON we install a copy of leantar
|
||||
option(INSTALL_LEANTAR "Install a copy of leantar" ON)
|
||||
|
||||
# FLAGS for disabling optimizations and debugging
|
||||
option(FREE_VAR_RANGE_OPT "FREE_VAR_RANGE_OPT" ON)
|
||||
@@ -751,6 +759,14 @@ if(STAGE GREATER 0 AND CADICAL AND INSTALL_CADICAL)
|
||||
add_dependencies(leancpp copy-cadical)
|
||||
endif()
|
||||
|
||||
if(STAGE GREATER 0 AND LEANTAR AND INSTALL_LEANTAR)
|
||||
add_custom_target(
|
||||
copy-leantar
|
||||
COMMAND cmake -E copy_if_different "${LEANTAR}" "${CMAKE_BINARY_DIR}/bin/leantar${CMAKE_EXECUTABLE_SUFFIX}"
|
||||
)
|
||||
add_dependencies(leancpp copy-leantar)
|
||||
endif()
|
||||
|
||||
# MSYS2 bash usually handles Windows paths relatively well, but not when putting them in the PATH
|
||||
string(REGEX REPLACE "^([a-zA-Z]):" "/\\1" LEAN_BIN "${CMAKE_BINARY_DIR}/bin")
|
||||
|
||||
@@ -907,6 +923,10 @@ if(STAGE GREATER 0 AND CADICAL AND INSTALL_CADICAL)
|
||||
install(PROGRAMS "${CADICAL}" DESTINATION bin)
|
||||
endif()
|
||||
|
||||
if(STAGE GREATER 0 AND LEANTAR AND INSTALL_LEANTAR)
|
||||
install(PROGRAMS "${LEANTAR}" DESTINATION bin)
|
||||
endif()
|
||||
|
||||
add_custom_target(
|
||||
clean-stdlib
|
||||
COMMAND rm -rf "${CMAKE_BINARY_DIR}/lib" || true
|
||||
|
||||
@@ -30,6 +30,7 @@ public import Init.Hints
|
||||
public import Init.Conv
|
||||
public import Init.Guard
|
||||
public import Init.Simproc
|
||||
public import Init.CbvSimproc
|
||||
public import Init.SizeOfLemmas
|
||||
public import Init.BinderPredicates
|
||||
public import Init.Ext
|
||||
|
||||
71
src/Init/CbvSimproc.lean
Normal file
71
src/Init/CbvSimproc.lean
Normal file
@@ -0,0 +1,71 @@
|
||||
/-
|
||||
Copyright (c) 2026 Lean FRO, LLC. All rights reserved.
|
||||
Released under Apache 2.0 license as described in the file LICENSE.
|
||||
Authors: Wojciech Różowski
|
||||
-/
|
||||
module
|
||||
|
||||
prelude
|
||||
public meta import Init.Data.ToString.Name -- shake: keep (transitive public meta dep, fix)
|
||||
public import Init.Tactics
|
||||
import Init.Meta.Defs
|
||||
|
||||
public section
|
||||
|
||||
namespace Lean.Parser
|
||||
|
||||
syntax cbvSimprocEval := "cbv_eval"
|
||||
|
||||
/--
|
||||
A user-defined simplification procedure used by the `cbv` tactic.
|
||||
The body must have type `Lean.Meta.Sym.Simp.Simproc` (`Expr → SimpM Result`).
|
||||
Procedures are indexed by a discrimination tree pattern and fire at one of three phases:
|
||||
`↓` (pre), `cbv_eval` (eval), or `↑` (post, default).
|
||||
-/
|
||||
syntax (docComment)? attrKind "cbv_simproc " (Tactic.simpPre <|> Tactic.simpPost <|> cbvSimprocEval)? ident " (" term ")" " := " term : command
|
||||
|
||||
/--
|
||||
A `cbv_simproc` declaration without automatically adding it to the cbv simproc set.
|
||||
To activate, use `attribute [cbv_simproc]`.
|
||||
-/
|
||||
syntax (docComment)? "cbv_simproc_decl " ident " (" term ")" " := " term : command
|
||||
|
||||
syntax (docComment)? attrKind "builtin_cbv_simproc " (Tactic.simpPre <|> Tactic.simpPost <|> cbvSimprocEval)? ident " (" term ")" " := " term : command
|
||||
|
||||
syntax (docComment)? "builtin_cbv_simproc_decl " ident " (" term ")" " := " term : command
|
||||
|
||||
syntax (name := cbvSimprocPattern) "cbv_simproc_pattern% " term " => " ident : command
|
||||
|
||||
syntax (name := cbvSimprocPatternBuiltin) "builtin_cbv_simproc_pattern% " term " => " ident : command
|
||||
|
||||
namespace Attr
|
||||
|
||||
syntax (name := cbvSimprocAttr) "cbv_simproc" (Tactic.simpPre <|> Tactic.simpPost <|> cbvSimprocEval)? : attr
|
||||
|
||||
syntax (name := cbvSimprocBuiltinAttr) "builtin_cbv_simproc" (Tactic.simpPre <|> Tactic.simpPost <|> cbvSimprocEval)? : attr
|
||||
|
||||
end Attr
|
||||
|
||||
macro_rules
|
||||
| `($[$doc?:docComment]? cbv_simproc_decl $n:ident ($pattern:term) := $body) => do
|
||||
let simprocType := `Lean.Meta.Sym.Simp.Simproc
|
||||
`($[$doc?:docComment]? meta def $n:ident : $(mkIdent simprocType) := $body
|
||||
cbv_simproc_pattern% $pattern => $n)
|
||||
|
||||
macro_rules
|
||||
| `($[$doc?:docComment]? builtin_cbv_simproc_decl $n:ident ($pattern:term) := $body) => do
|
||||
let simprocType := `Lean.Meta.Sym.Simp.Simproc
|
||||
`($[$doc?:docComment]? def $n:ident : $(mkIdent simprocType) := $body
|
||||
builtin_cbv_simproc_pattern% $pattern => $n)
|
||||
|
||||
macro_rules
|
||||
| `($[$doc?:docComment]? $kind:attrKind cbv_simproc $[$phase?]? $n:ident ($pattern:term) := $body) => do
|
||||
`($[$doc?:docComment]? cbv_simproc_decl $n ($pattern) := $body
|
||||
attribute [$kind cbv_simproc $[$phase?]?] $n)
|
||||
|
||||
macro_rules
|
||||
| `($[$doc?:docComment]? $kind:attrKind builtin_cbv_simproc $[$phase?]? $n:ident ($pattern:term) := $body) => do
|
||||
`($[$doc?:docComment]? builtin_cbv_simproc_decl $n ($pattern) := $body
|
||||
attribute [$kind builtin_cbv_simproc $[$phase?]?] $n)
|
||||
|
||||
end Lean.Parser
|
||||
@@ -69,9 +69,11 @@ theorem em (p : Prop) : p ∨ ¬p :=
|
||||
theorem exists_true_of_nonempty {α : Sort u} : Nonempty α → ∃ _ : α, True
|
||||
| ⟨x⟩ => ⟨x, trivial⟩
|
||||
|
||||
@[implicit_reducible]
|
||||
noncomputable def inhabited_of_nonempty {α : Sort u} (h : Nonempty α) : Inhabited α :=
|
||||
⟨choice h⟩
|
||||
|
||||
@[implicit_reducible]
|
||||
noncomputable def inhabited_of_exists {α : Sort u} {p : α → Prop} (h : ∃ x, p x) : Inhabited α :=
|
||||
inhabited_of_nonempty (Exists.elim h (fun w _ => ⟨w⟩))
|
||||
|
||||
@@ -81,6 +83,7 @@ noncomputable scoped instance (priority := low) propDecidable (a : Prop) : Decid
|
||||
| Or.inl h => ⟨isTrue h⟩
|
||||
| Or.inr h => ⟨isFalse h⟩
|
||||
|
||||
@[implicit_reducible]
|
||||
noncomputable def decidableInhabited (a : Prop) : Inhabited (Decidable a) where
|
||||
default := inferInstance
|
||||
|
||||
|
||||
@@ -18,3 +18,4 @@ public import Init.Control.StateCps
|
||||
public import Init.Control.ExceptCps
|
||||
public import Init.Control.MonadAttach
|
||||
public import Init.Control.EState
|
||||
public import Init.Control.Do
|
||||
|
||||
@@ -49,6 +49,7 @@ instance : Monad Id where
|
||||
/--
|
||||
The identity monad has a `bind` operator.
|
||||
-/
|
||||
@[implicit_reducible]
|
||||
def hasBind : Bind Id :=
|
||||
inferInstance
|
||||
|
||||
@@ -58,7 +59,7 @@ Runs a computation in the identity monad.
|
||||
This function is the identity function. Because its parameter has type `Id α`, it causes
|
||||
`do`-notation in its arguments to use the `Monad Id` instance.
|
||||
-/
|
||||
@[always_inline, inline, expose]
|
||||
@[always_inline, inline, expose, implicit_reducible]
|
||||
protected def run (x : Id α) : α := x
|
||||
|
||||
instance [OfNat α n] : OfNat (Id α) n :=
|
||||
|
||||
@@ -254,8 +254,8 @@ instance : LawfulMonad Id := by
|
||||
@[simp, grind =] theorem run_bind (x : Id α) (f : α → Id β) : (x >>= f).run = (f x.run).run := rfl
|
||||
@[simp, grind =] theorem run_pure (a : α) : (pure a : Id α).run = a := rfl
|
||||
@[simp, grind =] theorem pure_run (a : Id α) : pure a.run = a := rfl
|
||||
@[simp] theorem run_seqRight (x y : Id α) : (x *> y).run = y.run := rfl
|
||||
@[simp] theorem run_seqLeft (x y : Id α) : (x <* y).run = x.run := rfl
|
||||
@[simp] theorem run_seqRight (x : Id α) (y : Id β) : (x *> y).run = y.run := rfl
|
||||
@[simp] theorem run_seqLeft (x : Id α) (y : Id β) : (x <* y).run = x.run := rfl
|
||||
@[simp] theorem run_seq (f : Id (α → β)) (x : Id α) : (f <*> x).run = f.run x.run := rfl
|
||||
|
||||
end Id
|
||||
|
||||
@@ -280,7 +280,7 @@ resulting in `t'`, which becomes the new target subgoal. -/
|
||||
syntax (name := convConvSeq) "conv" " => " convSeq : conv
|
||||
|
||||
/-- `· conv` focuses on the main conv goal and tries to solve it using `s`. -/
|
||||
macro dot:patternIgnore("· " <|> ". ") s:convSeq : conv => `(conv| {%$dot ($s) })
|
||||
macro dot:unicode("· ", ". ") s:convSeq : conv => `(conv| {%$dot ($s) })
|
||||
|
||||
|
||||
/-- `fail_if_success t` fails if the tactic `t` succeeds. -/
|
||||
|
||||
@@ -34,3 +34,4 @@ public import Init.Data.Array.MinMax
|
||||
public import Init.Data.Array.Nat
|
||||
public import Init.Data.Array.Int
|
||||
public import Init.Data.Array.Count
|
||||
public import Init.Data.Array.Sort
|
||||
|
||||
@@ -148,6 +148,9 @@ end List
|
||||
|
||||
namespace Array
|
||||
|
||||
@[simp, grind =] theorem getElem!_toList [Inhabited α] {xs : Array α} {i : Nat} : xs.toList[i]! = xs[i]! := by
|
||||
rw [List.getElem!_toArray]
|
||||
|
||||
theorem size_eq_length_toList {xs : Array α} : xs.size = xs.toList.length := rfl
|
||||
|
||||
/-! ### Externs -/
|
||||
@@ -283,7 +286,7 @@ Examples:
|
||||
* `#[1, 2].isEmpty = false`
|
||||
* `#[()].isEmpty = false`
|
||||
-/
|
||||
@[expose]
|
||||
@[expose, inline]
|
||||
def isEmpty (xs : Array α) : Bool :=
|
||||
xs.size = 0
|
||||
|
||||
@@ -377,6 +380,7 @@ Returns the last element of an array, or panics if the array is empty.
|
||||
Safer alternatives include `Array.back`, which requires a proof the array is non-empty, and
|
||||
`Array.back?`, which returns an `Option`.
|
||||
-/
|
||||
@[inline]
|
||||
def back! [Inhabited α] (xs : Array α) : α :=
|
||||
xs[xs.size - 1]!
|
||||
|
||||
@@ -386,6 +390,7 @@ Returns the last element of an array, given a proof that the array is not empty.
|
||||
See `Array.back!` for the version that panics if the array is empty, or `Array.back?` for the
|
||||
version that returns an option.
|
||||
-/
|
||||
@[inline]
|
||||
def back (xs : Array α) (h : 0 < xs.size := by get_elem_tactic) : α :=
|
||||
xs[xs.size - 1]'(Nat.sub_one_lt_of_lt h)
|
||||
|
||||
@@ -395,6 +400,7 @@ Returns the last element of an array, or `none` if the array is empty.
|
||||
See `Array.back!` for the version that panics if the array is empty, or `Array.back` for the version
|
||||
that requires a proof the array is non-empty.
|
||||
-/
|
||||
@[inline]
|
||||
def back? (xs : Array α) : Option α :=
|
||||
xs[xs.size - 1]?
|
||||
|
||||
@@ -2145,7 +2151,4 @@ protected def repr {α : Type u} [Repr α] (xs : Array α) : Std.Format :=
|
||||
instance {α : Type u} [Repr α] : Repr (Array α) where
|
||||
reprPrec xs _ := Array.repr xs
|
||||
|
||||
instance [ToString α] : ToString (Array α) where
|
||||
toString xs := String.Internal.append "#" (toString xs.toList)
|
||||
|
||||
end Array
|
||||
|
||||
@@ -78,7 +78,7 @@ private theorem cons_lex_cons [BEq α] {lt : α → α → Bool} {a b : α} {xs
|
||||
simp only [lex, size_append, List.size_toArray, List.length_cons, List.length_nil, Nat.zero_add,
|
||||
Nat.add_min_add_left, Nat.add_lt_add_iff_left, Std.Rco.forIn'_eq_forIn'_toList]
|
||||
rw [cons_lex_cons.forIn'_congr_aux (Nat.toList_rco_eq_cons (by omega)) rfl (fun _ _ _ => rfl)]
|
||||
simp only [bind_pure_comp, map_pure, Nat.toList_rco_succ_succ, Nat.add_comm 1]
|
||||
simp only [Nat.toList_rco_succ_succ, Nat.add_comm 1]
|
||||
cases h : lt a b
|
||||
· cases h' : a == b <;> simp [bne, *]
|
||||
· simp [*]
|
||||
|
||||
10
src/Init/Data/Array/Sort.lean
Normal file
10
src/Init/Data/Array/Sort.lean
Normal file
@@ -0,0 +1,10 @@
|
||||
/-
|
||||
Copyright (c) 2026 Lean FRO. All rights reserved.
|
||||
Released under Apache 2.0 license as described in the file LICENSE.
|
||||
Authors: Paul Reichert
|
||||
-/
|
||||
module
|
||||
|
||||
prelude
|
||||
public import Init.Data.Array.Sort.Basic
|
||||
public import Init.Data.Array.Sort.Lemmas
|
||||
55
src/Init/Data/Array/Sort/Basic.lean
Normal file
55
src/Init/Data/Array/Sort/Basic.lean
Normal file
@@ -0,0 +1,55 @@
|
||||
/-
|
||||
Copyright (c) 2026 Lean FRO. All rights reserved.
|
||||
Released under Apache 2.0 license as described in the file LICENSE.
|
||||
Authors: Paul Reichert
|
||||
-/
|
||||
module
|
||||
|
||||
prelude
|
||||
public import Init.Data.Array.Subarray.Split
|
||||
public import Init.Data.Slice.Array
|
||||
import Init.Omega
|
||||
|
||||
public section
|
||||
|
||||
private def Array.MergeSort.Internal.merge (xs ys : Array α) (le : α → α → Bool := by exact (· ≤ ·)) :
|
||||
Array α :=
|
||||
if hxs : 0 < xs.size then
|
||||
if hys : 0 < ys.size then
|
||||
go xs[*...*] ys[*...*] (by simp only [Array.size_mkSlice_rii]; omega) (by simp only [Array.size_mkSlice_rii]; omega) (Array.emptyWithCapacity (xs.size + ys.size))
|
||||
else
|
||||
xs
|
||||
else
|
||||
ys
|
||||
where
|
||||
go (xs ys : Subarray α) (hxs : 0 < xs.size) (hys : 0 < ys.size) (acc : Array α) : Array α :=
|
||||
let x := xs[0]
|
||||
let y := ys[0]
|
||||
if le x y then
|
||||
if hi : 1 < xs.size then
|
||||
go (xs.drop 1) ys (by simp only [Subarray.size_drop]; omega) hys (acc.push x)
|
||||
else
|
||||
ys.foldl (init := acc.push x) (fun acc y => acc.push y)
|
||||
else
|
||||
if hj : 1 < ys.size then
|
||||
go xs (ys.drop 1) hxs (by simp only [Subarray.size_drop]; omega) (acc.push y)
|
||||
else
|
||||
xs.foldl (init := acc.push y) (fun acc x => acc.push x)
|
||||
termination_by xs.size + ys.size
|
||||
|
||||
def Subarray.mergeSort (xs : Subarray α) (le : α → α → Bool := by exact (· ≤ ·)) : Array α :=
|
||||
if h : 1 < xs.size then
|
||||
let splitIdx := (xs.size + 1) / 2 -- We follow the same splitting convention as `List.mergeSort`
|
||||
let left := xs[*...splitIdx]
|
||||
let right := xs[splitIdx...*]
|
||||
Array.MergeSort.Internal.merge (mergeSort left le) (mergeSort right le) le
|
||||
else
|
||||
xs.toArray
|
||||
termination_by xs.size
|
||||
decreasing_by
|
||||
· simp only [Subarray.size_mkSlice_rio]; omega
|
||||
· simp only [Subarray.size_mkSlice_rci]; omega
|
||||
|
||||
@[inline]
|
||||
def Array.mergeSort (xs : Array α) (le : α → α → Bool := by exact (· ≤ ·)) : Array α :=
|
||||
xs[*...*].mergeSort le
|
||||
240
src/Init/Data/Array/Sort/Lemmas.lean
Normal file
240
src/Init/Data/Array/Sort/Lemmas.lean
Normal file
@@ -0,0 +1,240 @@
|
||||
/-
|
||||
Copyright (c) 2026 Lean FRO. All rights reserved.
|
||||
Released under Apache 2.0 license as described in the file LICENSE.
|
||||
Authors: Paul Reichert
|
||||
-/
|
||||
module
|
||||
|
||||
prelude
|
||||
public import Init.Data.Array.Sort.Basic
|
||||
public import Init.Data.List.Sort.Basic
|
||||
public import Init.Data.Array.Perm
|
||||
import all Init.Data.Array.Sort.Basic
|
||||
import all Init.Data.List.Sort.Basic
|
||||
import Init.Data.List.Sort.Lemmas
|
||||
import Init.Data.Slice.Array.Lemmas
|
||||
import Init.Data.Slice.List.Lemmas
|
||||
import Init.Data.Array.Bootstrap
|
||||
import Init.Data.Array.Lemmas
|
||||
import Init.Data.Array.MapIdx
|
||||
import Init.ByCases
|
||||
|
||||
public section
|
||||
|
||||
private theorem Array.MergeSort.merge.go_eq_listMerge {xs ys : Subarray α} {hxs hys le acc} :
|
||||
(Array.MergeSort.Internal.merge.go le xs ys hxs hys acc).toList = acc.toList ++ List.merge xs.toList ys.toList le := by
|
||||
fun_induction Array.MergeSort.Internal.merge.go le xs ys hxs hys acc
|
||||
· rename_i xs ys _ _ _ _ _ _ _ _
|
||||
rw [List.merge.eq_def]
|
||||
split
|
||||
· have : xs.size = 0 := by simp [← Subarray.length_toList, *]
|
||||
omega
|
||||
· have : ys.size = 0 := by simp [← Subarray.length_toList, *]
|
||||
omega
|
||||
· rename_i x' xs' y' ys' _ _
|
||||
simp +zetaDelta only at *
|
||||
have h₁ : x' = xs[0] := by simp [Subarray.getElem_eq_getElem_toList, *]
|
||||
have h₂ : y' = ys[0] := by simp [Subarray.getElem_eq_getElem_toList, *]
|
||||
cases h₁
|
||||
cases h₂
|
||||
simp [Subarray.toList_drop, *]
|
||||
· rename_i xs ys _ _ _ _ _ _ _
|
||||
rw [List.merge.eq_def]
|
||||
split
|
||||
· have : xs.size = 0 := by simp [← Subarray.length_toList, *]
|
||||
omega
|
||||
· have : ys.size = 0 := by simp [← Subarray.length_toList, *]
|
||||
omega
|
||||
· rename_i x' xs' y' ys' _ _
|
||||
simp +zetaDelta only at *
|
||||
have h₁ : x' = xs[0] := by simp [Subarray.getElem_eq_getElem_toList, *]
|
||||
have h₂ : y' = ys[0] := by simp [Subarray.getElem_eq_getElem_toList, *]
|
||||
cases h₁
|
||||
cases h₂
|
||||
simp [*]
|
||||
have : xs.size = xs'.length + 1 := by simp [← Subarray.length_toList, *]
|
||||
have : xs' = [] := List.eq_nil_of_length_eq_zero (by omega)
|
||||
simp only [this]
|
||||
rw [← Subarray.foldl_toList]
|
||||
simp [*]
|
||||
· rename_i xs ys _ _ _ _ _ _ _ _
|
||||
rw [List.merge.eq_def]
|
||||
split
|
||||
· have : xs.size = 0 := by simp [← Subarray.length_toList, *]
|
||||
omega
|
||||
· have : ys.size = 0 := by simp [← Subarray.length_toList, *]
|
||||
omega
|
||||
· rename_i x' xs' y' ys' _ _
|
||||
simp +zetaDelta only at *
|
||||
have h₁ : x' = xs[0] := by simp [Subarray.getElem_eq_getElem_toList, *]
|
||||
have h₂ : y' = ys[0] := by simp [Subarray.getElem_eq_getElem_toList, *]
|
||||
cases h₁
|
||||
cases h₂
|
||||
simp [Subarray.toList_drop, *]
|
||||
· rename_i xs ys _ _ _ _ _ _ _
|
||||
rw [List.merge.eq_def]
|
||||
split
|
||||
· have : xs.size = 0 := by simp [← Subarray.length_toList, *]
|
||||
omega
|
||||
· have : ys.size = 0 := by simp [← Subarray.length_toList, *]
|
||||
omega
|
||||
· rename_i x' xs' y' ys' _ _
|
||||
simp +zetaDelta only at *
|
||||
have h₁ : x' = xs[0] := by simp [Subarray.getElem_eq_getElem_toList, *]
|
||||
have h₂ : y' = ys[0] := by simp [Subarray.getElem_eq_getElem_toList, *]
|
||||
cases h₁
|
||||
cases h₂
|
||||
simp [*]
|
||||
have : ys.size = ys'.length + 1 := by simp [← Subarray.length_toList, *]
|
||||
have : ys' = [] := List.eq_nil_of_length_eq_zero (by omega)
|
||||
simp [this]
|
||||
rw [← Subarray.foldl_toList]
|
||||
simp [*]
|
||||
|
||||
private theorem Array.MergeSort.merge_eq_listMerge {xs ys : Array α} {le} :
|
||||
(Array.MergeSort.Internal.merge xs ys le).toList = List.merge xs.toList ys.toList le := by
|
||||
rw [Array.MergeSort.Internal.merge]
|
||||
split <;> rename_i heq₁
|
||||
· split <;> rename_i heq₂
|
||||
· simp [Array.MergeSort.merge.go_eq_listMerge]
|
||||
· have : ys.toList = [] := by simp_all
|
||||
simp [this]
|
||||
· have : xs.toList = [] := by simp_all
|
||||
simp [this]
|
||||
|
||||
private theorem List.mergeSort_eq_merge_mkSlice {xs : List α} :
|
||||
xs.mergeSort le =
|
||||
if 1 < xs.length then
|
||||
merge (xs[*...((xs.length + 1) / 2)].toList.mergeSort le) (xs[((xs.length + 1) / 2)...*].toList.mergeSort le) le
|
||||
else
|
||||
xs := by
|
||||
fun_cases xs.mergeSort le
|
||||
· simp
|
||||
· simp
|
||||
· rename_i x y ys lr hl hr
|
||||
simp [lr]
|
||||
|
||||
theorem Subarray.toList_mergeSort {xs : Subarray α} {le : α → α → Bool} :
|
||||
(xs.mergeSort le).toList = xs.toList.mergeSort le := by
|
||||
fun_induction xs.mergeSort le
|
||||
· rw [List.mergeSort_eq_merge_mkSlice]
|
||||
simp +zetaDelta [Array.MergeSort.merge_eq_listMerge, *]
|
||||
· simp [List.mergeSort_eq_merge_mkSlice, *]
|
||||
|
||||
@[simp, grind =]
|
||||
theorem Subarray.mergeSort_eq_mergeSort_toArray {xs : Subarray α} {le : α → α → Bool} :
|
||||
xs.mergeSort le = xs.toArray.mergeSort le := by
|
||||
simp [← Array.toList_inj, toList_mergeSort, Array.mergeSort]
|
||||
|
||||
theorem Subarray.mergeSort_toArray {xs : Subarray α} {le : α → α → Bool} :
|
||||
xs.toArray.mergeSort le = xs.mergeSort le := by
|
||||
simp
|
||||
|
||||
theorem Array.toList_mergeSort {xs : Array α} {le : α → α → Bool} :
|
||||
(xs.mergeSort le).toList = xs.toList.mergeSort le := by
|
||||
rw [Array.mergeSort, Subarray.toList_mergeSort, Array.toList_mkSlice_rii]
|
||||
|
||||
theorem Array.mergeSort_eq_toArray_mergeSort_toList {xs : Array α} {le : α → α → Bool} :
|
||||
xs.mergeSort le = (xs.toList.mergeSort le).toArray := by
|
||||
simp [← toList_mergeSort]
|
||||
|
||||
/-!
|
||||
# Basic properties of `Array.mergeSort`.
|
||||
|
||||
* `pairwise_mergeSort`: `mergeSort` produces a sorted array.
|
||||
* `mergeSort_perm`: `mergeSort` is a permutation of the input array.
|
||||
* `mergeSort_of_pairwise`: `mergeSort` does not change a sorted array.
|
||||
* `sublist_mergeSort`: if `c` is a sorted sublist of `l`, then `c` is still a sublist of `mergeSort le l`.
|
||||
-/
|
||||
|
||||
namespace Array
|
||||
|
||||
-- Enable this instance locally so we can write `Pairwise le` instead of `Pairwise (le · ·)` everywhere.
|
||||
attribute [local instance] boolRelToRel
|
||||
|
||||
@[simp] theorem mergeSort_empty : (#[] : Array α).mergeSort r = #[] := by
|
||||
simp [mergeSort_eq_toArray_mergeSort_toList]
|
||||
|
||||
@[simp] theorem mergeSort_singleton {a : α} : #[a].mergeSort r = #[a] := by
|
||||
simp [mergeSort_eq_toArray_mergeSort_toList]
|
||||
|
||||
theorem mergeSort_perm {xs : Array α} {le} : (xs.mergeSort le).Perm xs := by
|
||||
simpa [mergeSort_eq_toArray_mergeSort_toList, Array.perm_iff_toList_perm] using List.mergeSort_perm _ _
|
||||
|
||||
@[simp] theorem size_mergeSort {xs : Array α} : (mergeSort xs le).size = xs.size := by
|
||||
simp [mergeSort_eq_toArray_mergeSort_toList]
|
||||
|
||||
@[simp] theorem mem_mergeSort {a : α} {xs : Array α} : a ∈ mergeSort xs le ↔ a ∈ xs := by
|
||||
simp [mergeSort_eq_toArray_mergeSort_toList]
|
||||
|
||||
/--
|
||||
The result of `Array.mergeSort` is sorted,
|
||||
as long as the comparison function is transitive (`le a b → le b c → le a c`)
|
||||
and total in the sense that `le a b || le b a`.
|
||||
|
||||
The comparison function need not be irreflexive, i.e. `le a b` and `le b a` is allowed even when `a ≠ b`.
|
||||
-/
|
||||
theorem pairwise_mergeSort
|
||||
(trans : ∀ (a b c : α), le a b → le b c → le a c)
|
||||
(total : ∀ (a b : α), le a b || le b a)
|
||||
{xs : Array α} :
|
||||
(mergeSort xs le).toList.Pairwise (le · ·) := by
|
||||
simpa [mergeSort_eq_toArray_mergeSort_toList] using List.pairwise_mergeSort trans total _
|
||||
|
||||
/--
|
||||
If the input array is already sorted, then `mergeSort` does not change the array.
|
||||
-/
|
||||
theorem mergeSort_of_pairwise {le : α → α → Bool} {xs : Array α} (_ : xs.toList.Pairwise (le · ·)) :
|
||||
mergeSort xs le = xs := by
|
||||
simpa [mergeSort_eq_toArray_mergeSort_toList, List.toArray_eq_iff] using List.mergeSort_of_pairwise ‹_›
|
||||
|
||||
/--
|
||||
This merge sort algorithm is stable,
|
||||
in the sense that breaking ties in the ordering function using the position in the array
|
||||
has no effect on the output.
|
||||
|
||||
That is, elements which are equal with respect to the ordering function will remain
|
||||
in the same order in the output array as they were in the input array.
|
||||
|
||||
See also:
|
||||
* `sublist_mergeSort`: if `c <+ l` and `c.Pairwise le`, then `c <+ (mergeSort le l).toList`.
|
||||
* `pair_sublist_mergeSort`: if `[a, b] <+ l` and `le a b`, then `[a, b] <+ (mergeSort le l).toList`)
|
||||
-/
|
||||
theorem mergeSort_zipIdx {xs : Array α} :
|
||||
(mergeSort (xs.zipIdx.map fun (a, i) => (a, i)) (List.zipIdxLE le)).map (·.1) = mergeSort xs le := by
|
||||
simpa [mergeSort_eq_toArray_mergeSort_toList, Array.toList_zipIdx] using List.mergeSort_zipIdx
|
||||
|
||||
/--
|
||||
Another statement of stability of merge sort.
|
||||
If `c` is a sorted sublist of `xs.toList`,
|
||||
then `c` is still a sublist of `(mergeSort le xs).toList`.
|
||||
-/
|
||||
theorem sublist_mergeSort {le : α → α → Bool}
|
||||
(trans : ∀ (a b c : α), le a b → le b c → le a c)
|
||||
(total : ∀ (a b : α), le a b || le b a)
|
||||
{ys : List α} (_ : ys.Pairwise (le · ·)) (_ : List.Sublist ys xs.toList) :
|
||||
List.Sublist ys (mergeSort xs le).toList := by
|
||||
simpa [mergeSort_eq_toArray_mergeSort_toList, Array.toList_zipIdx] using
|
||||
List.sublist_mergeSort trans total ‹_› ‹_›
|
||||
|
||||
/--
|
||||
Another statement of stability of merge sort.
|
||||
If a pair `[a, b]` is a sublist of `xs.toList` and `le a b`,
|
||||
then `[a, b]` is still a sublist of `(mergeSort le xs).toList`.
|
||||
-/
|
||||
theorem pair_sublist_mergeSort
|
||||
(trans : ∀ (a b c : α), le a b → le b c → le a c)
|
||||
(total : ∀ (a b : α), le a b || le b a)
|
||||
(hab : le a b) (h : List.Sublist [a, b] xs.toList) :
|
||||
List.Sublist [a, b] (mergeSort xs le).toList := by
|
||||
simpa [mergeSort_eq_toArray_mergeSort_toList, Array.toList_zipIdx] using
|
||||
List.pair_sublist_mergeSort trans total ‹_› ‹_›
|
||||
|
||||
theorem map_mergeSort {r : α → α → Bool} {s : β → β → Bool} {f : α → β}
|
||||
{xs : Array α} (hxs : ∀ a ∈ xs, ∀ b ∈ xs, r a b = s (f a) (f b)) :
|
||||
(xs.mergeSort r).map f = (xs.map f).mergeSort s := by
|
||||
simp only [mergeSort_eq_toArray_mergeSort_toList, List.map_toArray, toList_map, mk.injEq]
|
||||
apply List.map_mergeSort
|
||||
simpa
|
||||
|
||||
end Array
|
||||
@@ -2393,4 +2393,412 @@ theorem fastUmulOverflow (x y : BitVec w) :
|
||||
simp [← Nat.pow_add, show w + 1 - (k - 1) + k = w + 1 + 1 by omega] at this
|
||||
omega
|
||||
|
||||
/-! ### Population Count -/
|
||||
|
||||
/-- Extract the `k`-th bit from `x` and extend it to have length `len`. -/
|
||||
def extractAndExtendBit (idx len : Nat) (x : BitVec w) : BitVec len :=
|
||||
BitVec.zeroExtend len (BitVec.extractLsb' idx 1 x)
|
||||
|
||||
|
||||
/-- Recursively extract one bit at a time and extend it to width `w` -/
|
||||
def extractAndExtendAux (k len : Nat) (x : BitVec w) (acc : BitVec (k * len)) (hle : k ≤ w) :
|
||||
BitVec (w * len) :=
|
||||
match hwi : w - k with
|
||||
| 0 => acc.cast (by simp [show w = k by omega])
|
||||
| n' + 1 =>
|
||||
let acc' := extractAndExtendBit k len x ++ acc
|
||||
extractAndExtendAux (k + 1) len x (acc'.cast (by simp [Nat.add_mul]; omega)) (by omega)
|
||||
termination_by w - k
|
||||
|
||||
/-- We instantiate `extractAndExtendAux` to extend each bit to `len`, extending
|
||||
each bit in `x` to have width `w` and returning a `BitVec (w * w)`. -/
|
||||
def extractAndExtend (len : Nat) (x : BitVec w) : BitVec (w * len) :=
|
||||
extractAndExtendAux 0 len x ((0#0).cast (by simp)) (by omega)
|
||||
|
||||
/--
|
||||
Construct a layer of the parallel-prefix-sum tree by summing two-by-two all the
|
||||
`w`-long words in `oldLayer`, returning a bitvector containing `(oldLen + 1) / 2`
|
||||
flattened `w`-long words, each resulting from an addition.
|
||||
-/
|
||||
def cpopLayer (oldLayer : BitVec (len * w)) (newLayer : BitVec (iterNum * w))
|
||||
(hold : 2 * (iterNum - 1) < len) : BitVec (((len + 1)/2) * w) :=
|
||||
if hlen : len - (iterNum * 2) = 0 then
|
||||
have : ((len + 1)/2) = iterNum := by omega
|
||||
newLayer.cast (by simp [this])
|
||||
else
|
||||
let op1 := oldLayer.extractLsb' ((2 * iterNum) * w) w
|
||||
let op2 := oldLayer.extractLsb' ((2 * iterNum + 1) * w) w
|
||||
let newLayer' := (op1 + op2) ++ newLayer
|
||||
have hcast : w + iterNum * w = (iterNum + 1) * w := by simp [Nat.add_mul]; omega
|
||||
cpopLayer oldLayer (newLayer'.cast hcast) (by omega)
|
||||
termination_by len - (iterNum * 2)
|
||||
|
||||
/--
|
||||
Given a `BitVec (len * w)` of `len` flattened `w`-long words,
|
||||
construct a binary tree that sums two-by-two the `w`-long words in the previous layer,
|
||||
ultimately returning a single `w`-long words corresponding to the whole addition.
|
||||
-/
|
||||
def cpopTree (l : BitVec (len * w)) : BitVec w :=
|
||||
if h : len = 0 then 0#w
|
||||
else if h : len = 1 then
|
||||
l.cast (by simp [h])
|
||||
else
|
||||
cpopTree (cpopLayer l 0#(0 * w) (by omega))
|
||||
termination_by len
|
||||
|
||||
/--
|
||||
Given flattened bitvector `x : BitVec w` and a length `l : Nat`,
|
||||
construct a parallel prefix sum circuit adding each available `l`-long word in `x`.
|
||||
-/
|
||||
def cpopRec (x : BitVec w) : BitVec w :=
|
||||
if hw : 1 < w then
|
||||
let extendedBits := x.extractAndExtend w
|
||||
(cpopTree extendedBits).cast (by simp)
|
||||
else if hw' : 0 < w then
|
||||
x
|
||||
else
|
||||
0#w
|
||||
|
||||
/-- Recursive addition of the elements in a flattened bitvec, starting from the `rem`-th element. -/
|
||||
private def addRecAux (x : BitVec (l * w)) (rem : Nat) (acc : BitVec w) : BitVec w :=
|
||||
match rem with
|
||||
| 0 => acc
|
||||
| n + 1 => x.addRecAux n (acc + x.extractLsb' (n * w) w)
|
||||
|
||||
/-- Recursive addition of the elements in a flattened bitvec. -/
|
||||
private def addRec (x : BitVec (l * w)) : BitVec w := addRecAux x l 0#w
|
||||
|
||||
theorem getLsbD_extractAndExtendBit {x : BitVec w} :
|
||||
(extractAndExtendBit k len x).getLsbD i =
|
||||
(decide (i = 0) && decide (0 < len) && x.getLsbD k) := by
|
||||
simp only [extractAndExtendBit, truncate_eq_setWidth, getLsbD_setWidth, getLsbD_extractLsb',
|
||||
Nat.lt_one_iff]
|
||||
by_cases hi : i = 0
|
||||
<;> simp [hi]
|
||||
|
||||
@[simp]
|
||||
private theorem extractAndExtendAux_zero {k len : Nat} {x : BitVec w}
|
||||
{acc : BitVec (k * len)} (heq : w = k) :
|
||||
extractAndExtendAux k len x acc (by omega) = acc.cast (by simp [heq]) := by
|
||||
unfold extractAndExtendAux
|
||||
split
|
||||
· simp
|
||||
· omega
|
||||
|
||||
private theorem extractLsb'_extractAndExtendAux {k len : Nat} {x : BitVec w}
|
||||
(acc : BitVec (k * len)) (hle : k ≤ w) :
|
||||
(∀ i (_ : i < k), acc.extractLsb' (i * len) len = (x.extractLsb' i 1).setWidth len) →
|
||||
(extractAndExtendAux k len x acc (by omega)).extractLsb' (i * len) len =
|
||||
(x.extractLsb' i 1).setWidth len := by
|
||||
intros hacc
|
||||
induction hwi : w - k generalizing acc k
|
||||
· case zero =>
|
||||
rw [extractAndExtendAux_zero (by omega)]
|
||||
by_cases hj : i < k
|
||||
· apply hacc
|
||||
exact hj
|
||||
· ext l hl
|
||||
have := mul_le_mul_right (n := k) (m := i) len (by omega)
|
||||
simp [← getLsbD_eq_getElem, getLsbD_extractLsb', hl, getLsbD_setWidth,
|
||||
show w ≤ i + l by omega, getLsbD_of_ge acc (i * len + l) (by omega)]
|
||||
· case succ n' ihn' =>
|
||||
rw [extractAndExtendAux]
|
||||
split
|
||||
· omega
|
||||
· apply ihn'
|
||||
· intros i hi
|
||||
have hcast : len + k * len = (k + 1) * len := by
|
||||
simp [Nat.mul_comm, Nat.mul_add, Nat.add_comm]
|
||||
|
||||
by_cases hi' : i < k
|
||||
· have heq : extractLsb' (i * len) len (BitVec.cast hcast (extractAndExtendBit k len x ++ acc)) =
|
||||
extractLsb' (i * len) len ((extractAndExtendBit k len x ++ acc)) := by
|
||||
ext; simp
|
||||
rw [heq, extractLsb'_append_of_lt hi']
|
||||
apply hacc
|
||||
exact hi'
|
||||
· have heq : extractLsb' (i * len) len (BitVec.cast hcast (extractAndExtendBit k len x ++ acc)) =
|
||||
extractLsb' (i * len) len ((extractAndExtendBit k len x ++ acc)) := by
|
||||
ext; simp
|
||||
rw [heq, extractLsb'_append_of_eq (by omega)]
|
||||
simp [show i = k by omega, extractAndExtendBit]
|
||||
· omega
|
||||
|
||||
theorem extractLsb'_cpopLayer {w iterNum i oldLen : Nat} {oldLayer : BitVec (oldLen * w)}
|
||||
{newLayer : BitVec (iterNum * w)} (hold : 2 * (iterNum - 1) < oldLen) :
|
||||
(∀ i (_hi: i < iterNum),
|
||||
newLayer.extractLsb' (i * w) w =
|
||||
oldLayer.extractLsb' ((2 * i) * w) w + (oldLayer.extractLsb' ((2 * i + 1) * w) w)) →
|
||||
extractLsb' (i * w) w (oldLayer.cpopLayer newLayer hold) =
|
||||
extractLsb' (2 * i * w) w oldLayer + extractLsb' ((2 * i + 1) * w) w oldLayer := by
|
||||
intro proof_addition
|
||||
rw [cpopLayer]
|
||||
split
|
||||
· by_cases hi : i < iterNum
|
||||
· simp only [extractLsb'_cast]
|
||||
apply proof_addition
|
||||
exact hi
|
||||
· ext j hj
|
||||
have : iterNum * w ≤ i * w := by refine mul_le_mul_right w (by omega)
|
||||
have : oldLen * w ≤ (2 * i) * w := by refine mul_le_mul_right w (by omega)
|
||||
have : oldLen * w ≤ (2 * i + 1) * w := by refine mul_le_mul_right w (by omega)
|
||||
have hz : extractLsb' (2 * i * w) w oldLayer = 0#w := by
|
||||
ext j hj
|
||||
simp [show oldLen * w ≤ 2 * i * w + j by omega]
|
||||
have hz' : extractLsb' ((2 * i + 1) * w) w oldLayer = 0#w := by
|
||||
ext j hj
|
||||
simp [show oldLen * w ≤ (2 * i + 1) * w + j by omega]
|
||||
simp [show iterNum * w ≤ i * w + j by omega, hz, hz']
|
||||
· generalize hop1 : oldLayer.extractLsb' ((2 * iterNum) * w) w = op1
|
||||
generalize hop2 : oldLayer.extractLsb' ((2 * iterNum + 1) * w) w = op2
|
||||
have hcast : w + iterNum * w = (iterNum + 1) * w := by simp [Nat.add_mul]; omega
|
||||
apply extractLsb'_cpopLayer
|
||||
intros i hi
|
||||
by_cases hlt : i < iterNum
|
||||
· rw [extractLsb'_cast, extractLsb'_append_eq_of_add_le]
|
||||
· apply proof_addition
|
||||
exact hlt
|
||||
· rw [show i * w + w = i * w + 1 * w by omega, ← Nat.add_mul]
|
||||
exact mul_le_mul_right w hlt
|
||||
· rw [extractLsb'_cast, show i = iterNum by omega, extractLsb'_append_eq_left, hop1, hop2]
|
||||
termination_by oldLen - 2 * (iterNum + 1 - 1)
|
||||
|
||||
theorem getLsbD_cpopLayer {w iterNum: Nat} {oldLayer : BitVec (oldLen * w)}
|
||||
{newLayer : BitVec (iterNum * w)} (hold : 2 * (iterNum - 1) < oldLen) :
|
||||
(∀ i (_hi: i < iterNum),
|
||||
newLayer.extractLsb' (i * w) w =
|
||||
oldLayer.extractLsb' ((2 * i) * w) w + (oldLayer.extractLsb' ((2 * i + 1) * w) w)) →
|
||||
(oldLayer.cpopLayer newLayer hold).getLsbD k =
|
||||
(extractLsb' (2 * ((k - k % w) / w) * w) w oldLayer +
|
||||
extractLsb' ((2 * ((k - k % w) / w) + 1) * w) w oldLayer).getLsbD (k % w) := by
|
||||
intro proof_addition
|
||||
by_cases hw0 : w = 0
|
||||
· subst hw0
|
||||
simp
|
||||
· simp only [← extractLsb'_cpopLayer (hold := by omega) proof_addition,
|
||||
Nat.mod_lt (x := k) (y := w) (by omega), getLsbD_eq_getElem, getElem_extractLsb']
|
||||
congr
|
||||
by_cases hmod : k % w = 0
|
||||
· rw [hmod, Nat.sub_zero, Nat.add_zero, Nat.div_mul_cancel (by omega)]
|
||||
· rw [Nat.div_mul_cancel (by exact dvd_sub_mod k), Nat.sub_add_cancel (by exact mod_le k w)]
|
||||
|
||||
@[simp]
|
||||
private theorem addRecAux_zero {x : BitVec (l * w)} {acc : BitVec w} :
|
||||
x.addRecAux 0 acc = acc := rfl
|
||||
|
||||
@[simp]
|
||||
private theorem addRecAux_succ {x : BitVec (l * w)} {n : Nat} {acc : BitVec w} :
|
||||
x.addRecAux (n + 1) acc = x.addRecAux n (acc + extractLsb' (n * w) w x) := rfl
|
||||
|
||||
private theorem addRecAux_eq {x : BitVec (l * w)} {n : Nat} {acc : BitVec w} :
|
||||
x.addRecAux n acc = x.addRecAux n 0#w + acc := by
|
||||
induction n generalizing acc
|
||||
· case zero =>
|
||||
simp
|
||||
· case succ n ihn =>
|
||||
simp only [addRecAux_succ, BitVec.zero_add, ihn (acc := extractLsb' (n * w) w x),
|
||||
BitVec.add_assoc, ihn (acc := acc + extractLsb' (n * w) w x), BitVec.add_right_inj]
|
||||
rw [BitVec.add_comm (x := acc)]
|
||||
|
||||
private theorem extractLsb'_addRecAux_of_le {x : BitVec (len * w)} (h : r ≤ k):
|
||||
(extractLsb' 0 (k * w) x).addRecAux r 0#w = x.addRecAux r 0#w := by
|
||||
induction r generalizing x len k
|
||||
· case zero =>
|
||||
simp [addRecAux]
|
||||
· case succ diff ihdiff =>
|
||||
simp only [addRecAux_succ, BitVec.zero_add]
|
||||
have hext : diff * w + w ≤ k * w := by
|
||||
simp only [show diff * w + w = (diff + 1) * w by simp [Nat.add_mul]]
|
||||
exact Nat.mul_le_mul_right w h
|
||||
rw [extractLsb'_extractLsb'_of_le hext, addRecAux_eq (x := x),
|
||||
addRecAux_eq (x := extractLsb' 0 (k * w) x), ihdiff (x := x) (by omega) (k := k)]
|
||||
|
||||
private theorem extractLsb'_extractAndExtend_eq {i len : Nat} {x : BitVec w} :
|
||||
(extractAndExtend len x).extractLsb' (i * len) len = extractAndExtendBit i len x := by
|
||||
unfold extractAndExtend
|
||||
by_cases hilt : i < w
|
||||
· ext j hj
|
||||
simp [extractLsb'_extractAndExtendAux, extractAndExtendBit]
|
||||
· ext k hk
|
||||
have := Nat.mul_le_mul_right (n := w) (k := len) (m := i) (by omega)
|
||||
simp only [extractAndExtendBit, cast_ofNat, getElem_extractLsb', truncate_eq_setWidth,
|
||||
getElem_setWidth, getLsbD_extractLsb', Nat.lt_one_iff]
|
||||
rw [getLsbD_of_ge, getLsbD_of_ge]
|
||||
· simp
|
||||
· omega
|
||||
· omega
|
||||
|
||||
private theorem addRecAux_append_extractLsb' {x : BitVec (len * w)} (ha : 0 < len) :
|
||||
((x.extractLsb' ((len - 1) * w) w ++
|
||||
x.extractLsb' 0 ((len - 1) * w)).cast (m := len * w) hcast).addRecAux len 0#w =
|
||||
x.extractLsb' ((len - 1) * w) w +
|
||||
(x.extractLsb' 0 ((len - 1) * w)).addRecAux (len - 1) 0#w := by
|
||||
simp only [extractLsb'_addRecAux_of_le (k := len - 1) (r := len - 1) (by omega),
|
||||
BitVec.append_extractLsb'_of_lt (hcast := hcast)]
|
||||
have hsucc := addRecAux_succ (x := x) (acc := 0#w) (n := len - 1)
|
||||
rw [BitVec.zero_add, Nat.sub_one_add_one (by omega)] at hsucc
|
||||
rw [hsucc, addRecAux_eq, BitVec.add_comm]
|
||||
|
||||
private theorem Nat.mul_add_le_mul_of_succ_le {a b c : Nat} (h : a + 1 ≤ c) :
|
||||
a * b + b ≤ c * b := by
|
||||
rw [← Nat.succ_mul]
|
||||
exact mul_le_mul_right b h
|
||||
|
||||
/--
|
||||
The recursive addition of `w`-long words on two flattened bitvectors `x` and `y` (with different
|
||||
number of words `len` and `len'`, respectively) returns the same value, if we can prove
|
||||
that each `w`-long word in `x` results from the addition of two `w`-long words in `y`,
|
||||
using exactly all `w`-long words in `y`.
|
||||
-/
|
||||
private theorem addRecAux_eq_of {x : BitVec (len * w)} {y : BitVec (len' * w)}
|
||||
(hlen : len = (len' + 1) / 2) :
|
||||
(∀ (i : Nat) (_h : i < (len' + 1) / 2),
|
||||
extractLsb' (i * w) w x = extractLsb' (2 * i * w) w y + extractLsb' ((2 * i + 1) * w) w y) →
|
||||
x.addRecAux len 0#w = y.addRecAux len' 0#w := by
|
||||
intro hadd
|
||||
induction len generalizing len' y
|
||||
· case zero =>
|
||||
simp [show len' = 0 by omega]
|
||||
· case succ len ih =>
|
||||
have hcast : w + (len + 1 - 1) * w = (len + 1) * w := by
|
||||
simp [Nat.add_mul, Nat.add_comm]
|
||||
have hcast' : w + (len' - 1) * w = len' * w := by
|
||||
rw [Nat.sub_mul, Nat.one_mul,
|
||||
← Nat.add_sub_assoc (by refine Nat.le_mul_of_pos_left w (by omega)), Nat.add_comm]
|
||||
simp
|
||||
rw [addRecAux_succ, ← BitVec.append_extractLsb'_of_lt (x := x) (hcast := hcast)]
|
||||
have happ := addRecAux_append_extractLsb' (len := len + 1) (x := x) (hcast := hcast) (by omega)
|
||||
simp only [Nat.add_one_sub_one, addRecAux_succ, BitVec.zero_add] at happ
|
||||
simp only [Nat.add_one_sub_one, BitVec.zero_add, happ]
|
||||
have := Nat.succ_mul (n := len' - 1) (m := w)
|
||||
rw [succ_eq_add_one, Nat.sub_one_add_one (by omega)] at this
|
||||
by_cases hmod : len' % 2 = 0
|
||||
· /- `sum` results from the addition of the two last elements in `y`, `sum = op1 + op2` -/
|
||||
have := Nat.mul_le_mul_right (n := len' - 1 - 1) (m := len' - 1) (k := w) (by omega)
|
||||
have := Nat.succ_mul (n := len' - 1 - 1) (m := w)
|
||||
have hcast'' : w + (len' - 1 - 1) * w = (len' - 1) * w := by
|
||||
rw [Nat.sub_mul, Nat.one_mul,
|
||||
← Nat.add_sub_assoc (k := w) (by refine Nat.le_mul_of_pos_left w (by omega))]
|
||||
simp
|
||||
rw [succ_eq_add_one, Nat.sub_one_add_one (by omega)] at this
|
||||
rw [← BitVec.append_extractLsb'_of_lt (x := y) (hcast := hcast'),
|
||||
addRecAux_append_extractLsb' (by omega),
|
||||
← BitVec.append_extractLsb'_of_lt (x := extractLsb' 0 ((len' - 1) * w) y) (hcast := hcast''),
|
||||
addRecAux_append_extractLsb' (by omega),
|
||||
extractLsb'_extractLsb'_of_le (by exact Nat.mul_add_le_mul_of_succ_le (by omega)),
|
||||
extractLsb'_extractLsb'_of_le (by omega), ← BitVec.add_assoc, hadd (_h := by omega)]
|
||||
congr 1
|
||||
· rw [show len = (len' + 1) / 2 - 1 by omega, BitVec.add_comm]
|
||||
congr <;> omega
|
||||
· apply ih
|
||||
· omega
|
||||
· intros
|
||||
rw [extractLsb'_extractLsb'_of_le (by exact Nat.mul_add_le_mul_of_succ_le (by omega)),
|
||||
extractLsb'_extractLsb'_of_le (by exact Nat.mul_add_le_mul_of_succ_le (by omega)),
|
||||
extractLsb'_extractLsb'_of_le (by exact Nat.mul_add_le_mul_of_succ_le (by omega)),
|
||||
hadd (_h := by omega)]
|
||||
· /- `sum` results from the addition of the last elements in `y` with `0#w` -/
|
||||
have : len' * w ≤ (len' - 1 + 1) * w := by exact mul_le_mul_right w (by omega)
|
||||
rw [← BitVec.append_extractLsb'_of_lt (x := y) (hcast := hcast'),
|
||||
addRecAux_append_extractLsb' (by omega), hadd (_h := by omega),
|
||||
show 2 * len = len' - 1 by omega]
|
||||
congr 1
|
||||
· rw [BitVec.add_right_eq_self]
|
||||
ext k hk
|
||||
simp only [getElem_extractLsb', getElem_zero]
|
||||
apply getLsbD_of_ge y ((len' - 1 + 1) * w + k) (by omega)
|
||||
· apply ih
|
||||
· omega
|
||||
· intros
|
||||
rw [extractLsb'_extractLsb'_of_le (by exact Nat.mul_add_le_mul_of_succ_le (by omega)),
|
||||
extractLsb'_extractLsb'_of_le (by exact Nat.mul_add_le_mul_of_succ_le (by omega)),
|
||||
extractLsb'_extractLsb'_of_le (by exact Nat.mul_add_le_mul_of_succ_le (by omega)),
|
||||
hadd (_h := by omega)]
|
||||
|
||||
private theorem getLsbD_extractAndExtend_of_lt {x : BitVec w} (hk : k < v) :
|
||||
(x.extractAndExtend v).getLsbD (pos * v + k) = (extractAndExtendBit pos v x).getLsbD k := by
|
||||
simp [← extractLsb'_extractAndExtend_eq (w := w) (len := v) (i := pos) (x := x)]
|
||||
omega
|
||||
|
||||
/--
|
||||
Extracting a bit from a `BitVec.extractAndExtend` is the same as extracting a bit
|
||||
from a zero-extended bit at a certain position in the original bitvector.
|
||||
-/
|
||||
theorem getLsbD_extractAndExtend {x : BitVec w} (hv : 0 < v) :
|
||||
(BitVec.extractAndExtend v x).getLsbD k =
|
||||
(BitVec.extractAndExtendBit ((k - (k % v)) / v) v x).getLsbD (k % v):= by
|
||||
rw [← getLsbD_extractAndExtend_of_lt (by exact mod_lt k hv)]
|
||||
congr
|
||||
by_cases hmod : k % v = 0
|
||||
· simp only [hmod, Nat.sub_zero, Nat.add_zero]
|
||||
rw [Nat.div_mul_cancel (by omega)]
|
||||
· rw [← Nat.div_eq_sub_mod_div]
|
||||
exact Eq.symm (div_add_mod' k v)
|
||||
|
||||
private theorem addRecAux_extractAndExtend_eq_cpopNatRec {x : BitVec w} :
|
||||
(extractAndExtend w x).addRecAux n 0#w = x.cpopNatRec n 0 := by
|
||||
induction n
|
||||
· case zero =>
|
||||
simp
|
||||
· case succ n' ihn' =>
|
||||
rw [cpopNatRec_succ, Nat.zero_add, natCast_eq_ofNat, addRecAux_succ, BitVec.zero_add,
|
||||
addRecAux_eq, cpopNatRec_eq, ihn', ofNat_add, natCast_eq_ofNat, BitVec.add_right_inj,
|
||||
extractLsb'_extractAndExtend_eq]
|
||||
ext k hk
|
||||
simp only [extractAndExtendBit, ← getLsbD_eq_getElem, getLsbD_ofNat, hk, decide_true,
|
||||
Bool.true_and, truncate_eq_setWidth, getLsbD_setWidth, getLsbD_extractLsb', Nat.lt_one_iff]
|
||||
by_cases hk0 : k = 0
|
||||
· simp only [hk0, testBit_zero, decide_true, Nat.add_zero, Bool.true_and]
|
||||
cases x.getLsbD n' <;> simp
|
||||
· simp only [show ¬k = 0 by omega, decide_false, Bool.false_and]
|
||||
symm
|
||||
apply testBit_lt_two_pow ?_
|
||||
have : (x.getLsbD n').toNat ≤ 1 := by
|
||||
cases x.getLsbD n' <;> simp
|
||||
have : 1 < 2 ^ k := by exact Nat.one_lt_two_pow hk0
|
||||
omega
|
||||
|
||||
private theorem addRecAux_extractAndExtend_eq_cpop {x : BitVec w} :
|
||||
(extractAndExtend w x).addRecAux w 0#w = x.cpop := by
|
||||
simp only [cpop]
|
||||
apply addRecAux_extractAndExtend_eq_cpopNatRec
|
||||
|
||||
private theorem addRecAux_cpopTree {x : BitVec (len * w)} :
|
||||
addRecAux ((cpopTree x).cast (m := 1 * w) (by simp)) 1 0#w = addRecAux x len 0#w := by
|
||||
unfold cpopTree
|
||||
split
|
||||
· case _ h =>
|
||||
subst h
|
||||
simp [addRecAux]
|
||||
· case _ h =>
|
||||
split
|
||||
· case _ h' =>
|
||||
simp only [addRecAux_succ, Nat.zero_mul, BitVec.zero_add, addRecAux_zero, h']
|
||||
ext; simp
|
||||
· rw [addRecAux_cpopTree]
|
||||
apply BitVec.addRecAux_eq_of (x := cpopLayer x 0#(0 * w) (by omega)) (y := x)
|
||||
· rfl
|
||||
· intros j hj
|
||||
simp [extractLsb'_cpopLayer]
|
||||
termination_by len
|
||||
|
||||
private theorem addRecAux_eq_cpopTree {x : BitVec (len * w)} :
|
||||
x.addRecAux len 0#w = (x.cpopTree).cast (by simp) := by
|
||||
rw [← addRecAux_cpopTree, addRecAux_succ, Nat.zero_mul, BitVec.zero_add, addRecAux_zero]
|
||||
ext k hk
|
||||
simp [← getLsbD_eq_getElem, hk]
|
||||
|
||||
theorem cpop_eq_cpopRec {x : BitVec w} :
|
||||
BitVec.cpop x = BitVec.cpopRec x := by
|
||||
unfold BitVec.cpopRec
|
||||
split
|
||||
· simp [← addRecAux_extractAndExtend_eq_cpop, addRecAux_eq_cpopTree (x := extractAndExtend w x)]
|
||||
· split
|
||||
· ext k hk
|
||||
cases hx : x.getLsbD 0
|
||||
<;> simp [hx, cpop, ← getLsbD_eq_getElem, show k = 0 by omega, show w = 1 by omega]
|
||||
· have hw : w = 0 := by omega
|
||||
subst hw
|
||||
simp [of_length_zero]
|
||||
|
||||
end BitVec
|
||||
|
||||
@@ -2786,6 +2786,14 @@ theorem msb_append {x : BitVec w} {y : BitVec v} :
|
||||
rw [getElem_append] -- Why does this not work with `simp [getElem_append]`?
|
||||
simp
|
||||
|
||||
theorem append_of_zero_width (x : BitVec w) (y : BitVec v) (h : w = 0) :
|
||||
(x ++ y) = y.cast (by simp [h]) := by
|
||||
ext i ih
|
||||
subst h
|
||||
simp [← getLsbD_eq_getElem, getLsbD_append]
|
||||
omega
|
||||
|
||||
set_option backward.isDefEq.respectTransparency false in
|
||||
@[grind =]
|
||||
theorem toInt_append {x : BitVec n} {y : BitVec m} :
|
||||
(x ++ y).toInt = if n == 0 then y.toInt else (2 ^ m) * x.toInt + y.toNat := by
|
||||
@@ -3012,6 +3020,34 @@ theorem extractLsb'_append_extractLsb'_eq_extractLsb' {x : BitVec w} (h : start
|
||||
congr 1
|
||||
omega
|
||||
|
||||
theorem append_extractLsb'_of_lt {x : BitVec (x_len * w)} :
|
||||
(x.extractLsb' ((x_len - 1) * w) w ++ x.extractLsb' 0 ((x_len - 1) * w)).cast hcast = x := by
|
||||
ext i hi
|
||||
simp only [getElem_cast, getElem_append, getElem_extractLsb', Nat.zero_add, dite_eq_ite]
|
||||
rw [← getLsbD_eq_getElem, ite_eq_left_iff, Nat.not_lt]
|
||||
intros
|
||||
simp only [show (x_len - 1) * w + (i - (x_len - 1) * w) = i by omega]
|
||||
|
||||
|
||||
theorem extractLsb'_append_of_lt {x : BitVec (k * w)} {y : BitVec w} (hlt : i < k) :
|
||||
extractLsb' (i * w) w (y ++ x) = extractLsb' (i * w) w x := by
|
||||
ext j hj
|
||||
simp only [← getLsbD_eq_getElem, getLsbD_extractLsb', hj, decide_true, getLsbD_append,
|
||||
Bool.true_and, ite_eq_left_iff, Nat.not_lt]
|
||||
intros h
|
||||
by_cases hw0 : w = 0
|
||||
· subst hw0
|
||||
simp
|
||||
· have : i * w ≤ (k - 1) * w := Nat.mul_le_mul_right w (by omega)
|
||||
have h' : i * w + j < (k - 1 + 1) * w := by simp [Nat.add_mul]; omega
|
||||
rw [Nat.sub_one_add_one (by omega)] at h'
|
||||
omega
|
||||
|
||||
theorem extractLsb'_append_of_eq {x : BitVec (k * w)} {y : BitVec w} (heq : i = k) :
|
||||
extractLsb' (i * w) w (y ++ x) = y := by
|
||||
ext j hj
|
||||
simp [← getLsbD_eq_getElem, getLsbD_append, hj, heq]
|
||||
|
||||
/-- Combine adjacent `~~~ (extractLsb _)'` operations into a single `~~~ (extractLsb _)'`. -/
|
||||
theorem not_extractLsb'_append_not_extractLsb'_eq_not_extractLsb' {x : BitVec w} (h : start₂ = start₁ + len₁) :
|
||||
(~~~ (x.extractLsb' start₂ len₂) ++ ~~~ (x.extractLsb' start₁ len₁)) =
|
||||
|
||||
@@ -629,6 +629,7 @@ export Bool (cond_eq_if cond_eq_ite xor and or not)
|
||||
This should not be turned on globally as an instance because it degrades performance in Mathlib,
|
||||
but may be used locally.
|
||||
-/
|
||||
@[implicit_reducible]
|
||||
def boolPredToPred : Coe (α → Bool) (α → Prop) where
|
||||
coe r := fun a => Eq (r a) true
|
||||
|
||||
|
||||
@@ -469,5 +469,3 @@ def prevn : Iterator → Nat → Iterator
|
||||
|
||||
end Iterator
|
||||
end ByteArray
|
||||
|
||||
instance : ToString ByteArray := ⟨fun bs => bs.toList.toString⟩
|
||||
|
||||
@@ -129,6 +129,14 @@ The ASCII digits are the following: `0123456789`.
|
||||
@[inline] def isDigit (c : Char) : Bool :=
|
||||
c.val ≥ '0'.val && c.val ≤ '9'.val
|
||||
|
||||
/--
|
||||
Returns `true` if the character is an ASCII hexadecimal digit.
|
||||
|
||||
The ASCII hexadecimal digits are the following: `0123456789abcdefABCDEF`.
|
||||
-/
|
||||
@[inline] def isHexDigit (c : Char) : Bool :=
|
||||
c.isDigit || (c.val ≥ 'a'.val && c.val ≤ 'f'.val) || (c.val ≥ 'A'.val && c.val ≤ 'F'.val)
|
||||
|
||||
/--
|
||||
Returns `true` if the character is an ASCII letter or digit.
|
||||
|
||||
|
||||
@@ -62,7 +62,7 @@ instance ltTrichotomous : Std.Trichotomous (· < · : Char → Char → Prop) wh
|
||||
trichotomous _ _ h₁ h₂ := Char.le_antisymm (by simpa using h₂) (by simpa using h₁)
|
||||
|
||||
@[deprecated ltTrichotomous (since := "2025-10-27")]
|
||||
def notLTAntisymm : Std.Antisymm (¬ · < · : Char → Char → Prop) where
|
||||
theorem notLTAntisymm : Std.Antisymm (¬ · < · : Char → Char → Prop) where
|
||||
antisymm := Char.ltTrichotomous.trichotomous
|
||||
|
||||
instance ltAsymm : Std.Asymm (· < · : Char → Char → Prop) where
|
||||
@@ -73,7 +73,7 @@ instance leTotal : Std.Total (· ≤ · : Char → Char → Prop) where
|
||||
|
||||
-- This instance is useful while setting up instances for `String`.
|
||||
@[deprecated ltAsymm (since := "2025-08-01")]
|
||||
def notLTTotal : Std.Total (¬ · < · : Char → Char → Prop) where
|
||||
theorem notLTTotal : Std.Total (¬ · < · : Char → Char → Prop) where
|
||||
total := fun x y => by simpa using Char.le_total y x
|
||||
|
||||
@[simp] theorem ofNat_toNat (c : Char) : Char.ofNat c.toNat = c := by
|
||||
|
||||
@@ -9,6 +9,7 @@ prelude
|
||||
public import Init.Data.Float
|
||||
import Init.Ext
|
||||
public import Init.GetElem
|
||||
public import Init.Data.ToString.Extra
|
||||
|
||||
public section
|
||||
universe u
|
||||
|
||||
@@ -414,7 +414,7 @@ Renders a `Format` to a string.
|
||||
-/
|
||||
def pretty (f : Format) (width : Nat := defWidth) (indent : Nat := 0) (column := 0) : String :=
|
||||
let act : StateM State Unit := prettyM f width indent
|
||||
State.out <| act (State.mk "" column) |>.snd
|
||||
State.out <| act.run (State.mk "" column) |>.snd
|
||||
|
||||
end Format
|
||||
|
||||
|
||||
@@ -118,16 +118,19 @@ theorem toNat_pow_of_nonneg {x : Int} (h : 0 ≤ x) (k : Nat) : (x ^ k).toNat =
|
||||
| succ k ih =>
|
||||
rw [Int.pow_succ, Int.toNat_mul (Int.pow_nonneg h) h, ih, Nat.pow_succ]
|
||||
|
||||
protected theorem sq_nonnneg (m : Int) : 0 ≤ m ^ 2 := by
|
||||
protected theorem sq_nonneg (m : Int) : 0 ≤ m ^ 2 := by
|
||||
rw [Int.pow_succ, Int.pow_one]
|
||||
cases m
|
||||
· apply Int.mul_nonneg <;> simp
|
||||
· apply Int.mul_nonneg_of_nonpos_of_nonpos <;> exact negSucc_le_zero _
|
||||
|
||||
@[deprecated Int.sq_nonneg (since := "2026-03-13")]
|
||||
protected theorem sq_nonnneg (m : Int) : 0 ≤ m ^ 2 := Int.sq_nonneg m
|
||||
|
||||
protected theorem pow_nonneg_of_even {m : Int} {n : Nat} (h : n % 2 = 0) : 0 ≤ m ^ n := by
|
||||
rw [← Nat.mod_add_div n 2, h, Nat.zero_add, Int.pow_mul]
|
||||
apply Int.pow_nonneg
|
||||
exact Int.sq_nonnneg m
|
||||
exact Int.sq_nonneg m
|
||||
|
||||
protected theorem neg_pow {m : Int} {n : Nat} : (-m)^n = (-1)^(n % 2) * m^n := by
|
||||
rw [Int.neg_eq_neg_one_mul, Int.mul_pow]
|
||||
|
||||
@@ -6,6 +6,7 @@ Authors: Paul Reichert
|
||||
module
|
||||
|
||||
prelude
|
||||
public import Init.Data.Iterators.Combinators.Append
|
||||
public import Init.Data.Iterators.Combinators.Monadic
|
||||
public import Init.Data.Iterators.Combinators.FilterMap
|
||||
public import Init.Data.Iterators.Combinators.FlatMap
|
||||
|
||||
79
src/Init/Data/Iterators/Combinators/Append.lean
Normal file
79
src/Init/Data/Iterators/Combinators/Append.lean
Normal file
@@ -0,0 +1,79 @@
|
||||
/-
|
||||
Copyright (c) 2026 Lean FRO, LLC. All rights reserved.
|
||||
Released under Apache 2.0 license as described in the file LICENSE.
|
||||
Authors: Paul Reichert
|
||||
-/
|
||||
module
|
||||
|
||||
prelude
|
||||
public import Init.Data.Iterators.Combinators.Monadic.Append
|
||||
|
||||
public section
|
||||
|
||||
namespace Std
|
||||
open Std.Iterators Std.Iterators.Types
|
||||
|
||||
/--
|
||||
Given two iterators `it₁` and `it₂`, `it₁.append it₂` is an iterator that first outputs all values
|
||||
of `it₁` in order and then all values of `it₂` in order.
|
||||
|
||||
**Marble diagram:**
|
||||
|
||||
```text
|
||||
it₁ ---a----b---c--⊥
|
||||
it₂ --d--e--⊥
|
||||
it₁.append it₂ ---a----b---c-----d--e--⊥
|
||||
```
|
||||
|
||||
**Termination properties:**
|
||||
|
||||
* `Finite` instance: only if `it₁` and `it₂` are finite
|
||||
* `Productive` instance: only if `it₁` and `it₂` are productive
|
||||
|
||||
Note: If `it₁` is not finite, then `it₁.append it₂` can be productive while `it₂` is not.
|
||||
The standard library does not provide a `Productive` instance for this case.
|
||||
|
||||
**Performance:**
|
||||
|
||||
This combinator incurs an additional O(1) cost with each output of `it₁` and `it₂`.
|
||||
-/
|
||||
@[inline, expose]
|
||||
def Iter.append {α₁ : Type w} {α₂ : Type w} {β : Type w}
|
||||
[Iterator α₁ Id β] [Iterator α₂ Id β]
|
||||
(it₁ : Iter (α := α₁) β) (it₂ : Iter (α := α₂) β) :
|
||||
Iter (α := Append α₁ α₂ Id β) β :=
|
||||
(it₁.toIterM.append it₂.toIterM).toIter
|
||||
|
||||
/--
|
||||
This combinator is only useful for advanced use cases.
|
||||
|
||||
Given an iterator `it₂`, returns an iterator that behaves exactly like `it₂` but is of the same
|
||||
type as `it₁.append it₂` (after `it₁` has been exhausted).
|
||||
This is useful for constructing intermediate states of the append iterator.
|
||||
|
||||
**Marble diagram:**
|
||||
|
||||
```text
|
||||
it₂ --a--b--⊥
|
||||
Iter.appendSnd α₁ it₂ --a--b--⊥
|
||||
```
|
||||
|
||||
**Termination properties:**
|
||||
|
||||
* `Finite` instance: only if `it₂` and iterators of type `α₁` are finite
|
||||
* `Productive` instance: only if `it₂` and iterators of type `α₁` are productive
|
||||
|
||||
Note: If iterators of type `α₁` are not finite, then `append α₁ it₂` can be productive while `it₂` is not.
|
||||
The standard library does not provide a `Productive` instance for this case.
|
||||
|
||||
**Performance:**
|
||||
|
||||
This combinator incurs an additional O(1) cost with each output of `it₂`.
|
||||
-/
|
||||
@[inline, expose]
|
||||
def Iter.Intermediate.appendSnd {α₂ : Type w} {β : Type w}
|
||||
[Iterator α₂ Id β] (α₁ : Type w) (it₂ : Iter (α := α₂) β) :
|
||||
Iter (α := Append α₁ α₂ Id β) β :=
|
||||
(IterM.Intermediate.appendSnd α₁ it₂.toIterM).toIter
|
||||
|
||||
end Std
|
||||
@@ -6,6 +6,7 @@ Authors: Paul Reichert
|
||||
module
|
||||
|
||||
prelude
|
||||
public import Init.Data.Iterators.Combinators.Monadic.Append
|
||||
public import Init.Data.Iterators.Combinators.Monadic.FilterMap
|
||||
public import Init.Data.Iterators.Combinators.Monadic.FlatMap
|
||||
public import Init.Data.Iterators.Combinators.Monadic.Take
|
||||
|
||||
261
src/Init/Data/Iterators/Combinators/Monadic/Append.lean
Normal file
261
src/Init/Data/Iterators/Combinators/Monadic/Append.lean
Normal file
@@ -0,0 +1,261 @@
|
||||
/-
|
||||
Copyright (c) 2026 Lean FRO, LLC. All rights reserved.
|
||||
Released under Apache 2.0 license as described in the file LICENSE.
|
||||
Authors: Paul Reichert
|
||||
-/
|
||||
module
|
||||
|
||||
prelude
|
||||
public import Init.Data.Iterators.Consumers.Monadic.Loop
|
||||
public import Init.Classical
|
||||
import Init.Data.Option.Lemmas
|
||||
import Init.ByCases
|
||||
import Init.Omega
|
||||
|
||||
public section
|
||||
|
||||
/-!
|
||||
This module provides the iterator combinator `IterM.append`.
|
||||
-/
|
||||
|
||||
namespace Std
|
||||
|
||||
variable {α : Type w} {m : Type w → Type w'} {β : Type w}
|
||||
|
||||
/--
|
||||
The internal state of the `IterM.append` iterator combinator.
|
||||
-/
|
||||
inductive Iterators.Types.Append (α₁ α₂ : Type w) (m : Type w → Type w') (β : Type w) where
|
||||
| fst : IterM (α := α₁) m β → IterM (α := α₂) m β → Append α₁ α₂ m β
|
||||
| snd : IterM (α := α₂) m β → Append α₁ α₂ m β
|
||||
|
||||
open Std.Iterators Std.Iterators.Types
|
||||
|
||||
/--
|
||||
Given two iterators `it₁` and `it₂`, `it₁.append it₂` is an iterator that first outputs all values
|
||||
of `it₁` in order and then all values of `it₂` in order.
|
||||
|
||||
**Marble diagram:**
|
||||
|
||||
```text
|
||||
it₁ ---a----b---c--⊥
|
||||
it₂ --d--e--⊥
|
||||
it₁.append it₂ ---a----b---c-----d--e--⊥
|
||||
```
|
||||
|
||||
**Termination properties:**
|
||||
|
||||
* `Finite` instance: only if `it₁` and `it₂` are finite
|
||||
* `Productive` instance: only if `it₁` and `it₂` are productive
|
||||
|
||||
Note: If `it₁` is not finite, then `it₁.append it₂` can be productive while `it₂` is not.
|
||||
The standard library does not provide a `Productive` instance for this case.
|
||||
|
||||
**Performance:**
|
||||
|
||||
This combinator incurs an additional O(1) cost with each output of `it₁` and `it₂`.
|
||||
-/
|
||||
@[inline, expose]
|
||||
def IterM.append [Iterator α₁ m β] [Iterator α₂ m β]
|
||||
(it₁ : IterM (α := α₁) m β) (it₂ : IterM (α := α₂) m β) :=
|
||||
(⟨Iterators.Types.Append.fst it₁ it₂⟩ : IterM m β)
|
||||
|
||||
/--
|
||||
This combinator is only useful for advanced use cases.
|
||||
|
||||
Given an iterator `it₂`, `IterM.Intermediate.appendSnd α₁ it₂` returns an iterator that behaves
|
||||
exactly like `it₂` but has the same type as `it₁.append it₂` (after `it₁` has been exhausted).
|
||||
This is useful for constructing intermediate states of the append iterator.
|
||||
|
||||
**Marble diagram:**
|
||||
|
||||
```text
|
||||
it₂ --a--b--⊥
|
||||
IterM.Intermediate.appendSnd α₁ it₂ --a--b--⊥
|
||||
```
|
||||
|
||||
**Termination properties:**
|
||||
|
||||
* `Finite` instance: only if `it₂` and iterators of type `α₁` are finite
|
||||
* `Productive` instance: only if `it₂` and iterators of type `α₁` are productive
|
||||
|
||||
Note: If iterators of type `α₁` are not finite, then `appendSnd α₁ it₂` can be productive
|
||||
while `it₂` is not. The standard library does not provide a `Productive` instance for this case.
|
||||
|
||||
**Performance:**
|
||||
|
||||
This combinator incurs an additional O(1) cost with each output of `it₂`.
|
||||
-/
|
||||
@[inline, expose]
|
||||
def IterM.Intermediate.appendSnd [Iterator α₂ m β] (α₁ : Type w) (it₂ : IterM (α := α₂) m β) :=
|
||||
(⟨Iterators.Types.Append.snd (α₁ := α₁) it₂⟩ : IterM m β)
|
||||
|
||||
namespace Iterators.Types
|
||||
|
||||
inductive Append.PlausibleStep [Iterator α₁ m β] [Iterator α₂ m β] :
|
||||
IterM (α := Append α₁ α₂ m β) m β → IterStep (IterM (α := Append α₁ α₂ m β) m β) β → Prop where
|
||||
| fstYield {it₁ : IterM (α := α₁) m β} {it₂ : IterM (α := α₂) m β} :
|
||||
it₁.IsPlausibleStep (.yield it₁' out) → PlausibleStep (it₁.append it₂) (.yield (it₁'.append it₂) out)
|
||||
| fstSkip {it₁ : IterM (α := α₁) m β} {it₂ : IterM (α := α₂) m β} :
|
||||
it₁.IsPlausibleStep (.skip it₁') → PlausibleStep (it₁.append it₂) (.skip (it₁'.append it₂))
|
||||
| fstDone {it₁ : IterM (α := α₁) m β} {it₂ : IterM (α := α₂) m β} :
|
||||
it₁.IsPlausibleStep .done → PlausibleStep (it₁.append it₂) (.skip (IterM.Intermediate.appendSnd α₁ it₂))
|
||||
| sndYield {it₂ : IterM (α := α₂) m β} :
|
||||
it₂.IsPlausibleStep (.yield it₂' out) →
|
||||
PlausibleStep (IterM.Intermediate.appendSnd α₁ it₂) (.yield (IterM.Intermediate.appendSnd α₁ it₂') out)
|
||||
| sndSkip {it₂ : IterM (α := α₂) m β} :
|
||||
it₂.IsPlausibleStep (.skip it₂') →
|
||||
PlausibleStep (IterM.Intermediate.appendSnd α₁ it₂) (.skip (IterM.Intermediate.appendSnd α₁ it₂'))
|
||||
| sndDone {it₂ : IterM (α := α₂) m β} :
|
||||
it₂.IsPlausibleStep .done → PlausibleStep (IterM.Intermediate.appendSnd α₁ it₂) .done
|
||||
|
||||
@[inline]
|
||||
instance Append.instIterator [Monad m] [Iterator α₁ m β] [Iterator α₂ m β] :
|
||||
Iterator (Append α₁ α₂ m β) m β where
|
||||
IsPlausibleStep := Append.PlausibleStep
|
||||
step
|
||||
| ⟨.fst it₁ it₂⟩ => do
|
||||
match (← it₁.step).inflate with
|
||||
| .yield it₁' out h => return .deflate <| .yield (it₁'.append it₂) out (.fstYield h)
|
||||
| .skip it₁' h => return .deflate <| .skip (it₁'.append it₂) (.fstSkip h)
|
||||
| .done h => return .deflate <| .skip (IterM.Intermediate.appendSnd α₁ it₂) (.fstDone h)
|
||||
| ⟨.snd it₂⟩ => do
|
||||
match (← it₂.step).inflate with
|
||||
| .yield it₂' out h => return .deflate <| .yield (IterM.Intermediate.appendSnd α₁ it₂') out (.sndYield h)
|
||||
| .skip it₂' h => return .deflate <| .skip (IterM.Intermediate.appendSnd α₁ it₂') (.sndSkip h)
|
||||
| .done h => return .deflate <| .done (.sndDone h)
|
||||
|
||||
instance Append.instIteratorLoop {n : Type x → Type x'} [Monad m] [Monad n]
|
||||
[Iterator α₁ m β] [Iterator α₂ m β] :
|
||||
IteratorLoop (Append α₁ α₂ m β) m n :=
|
||||
.defaultImplementation
|
||||
|
||||
section Finite
|
||||
|
||||
variable {α₁ : Type w} {α₂ : Type w} {m : Type w → Type w'} {β : Type w}
|
||||
|
||||
variable (α₁ α₂ m β) in
|
||||
def Append.Rel [Monad m] [Iterator α₁ m β] [Iterator α₂ m β] [Finite α₁ m] [Finite α₂ m] :
|
||||
IterM (α := Append α₁ α₂ m β) m β → IterM (α := Append α₁ α₂ m β) m β → Prop :=
|
||||
InvImage
|
||||
(Prod.Lex
|
||||
(Option.lt (InvImage IterM.TerminationMeasures.Finite.Rel IterM.finitelyManySteps))
|
||||
(InvImage IterM.TerminationMeasures.Finite.Rel IterM.finitelyManySteps))
|
||||
(fun it => match it.internalState with
|
||||
| .fst it₁ it₂ => (some it₁, it₂)
|
||||
| .snd it₂ => (none, it₂))
|
||||
|
||||
theorem Append.rel_of_fst [Monad m] [Iterator α₁ m β] [Iterator α₂ m β]
|
||||
[Finite α₁ m] [Finite α₂ m] {it₁ it₁' : IterM (α := α₁) m β} {it₂ : IterM (α := α₂) m β}
|
||||
(h : it₁'.finitelyManySteps.Rel it₁.finitelyManySteps) :
|
||||
Append.Rel α₁ α₂ m β (it₁'.append it₂) (it₁.append it₂) := by
|
||||
exact Prod.Lex.left _ _ h
|
||||
|
||||
theorem Append.rel_fstDone [Monad m] [Iterator α₁ m β] [Iterator α₂ m β]
|
||||
[Finite α₁ m] [Finite α₂ m] {it₁ : IterM (α := α₁) m β} {it₂ : IterM (α := α₂) m β} :
|
||||
Append.Rel α₁ α₂ m β (IterM.Intermediate.appendSnd α₁ it₂) (it₁.append it₂) := by
|
||||
exact Prod.Lex.left _ _ trivial
|
||||
|
||||
theorem Append.rel_of_snd [Monad m] [Iterator α₁ m β] [Iterator α₂ m β]
|
||||
[Finite α₁ m] [Finite α₂ m] {it₂ it₂' : IterM (α := α₂) m β}
|
||||
(h : it₂'.finitelyManySteps.Rel it₂.finitelyManySteps) :
|
||||
Append.Rel α₁ α₂ m β (IterM.Intermediate.appendSnd α₁ it₂') (IterM.Intermediate.appendSnd α₁ it₂) := by
|
||||
exact Prod.Lex.right _ h
|
||||
|
||||
def Append.instFinitenessRelation [Monad m] [Iterator α₁ m β] [Iterator α₂ m β]
|
||||
[Finite α₁ m] [Finite α₂ m] :
|
||||
FinitenessRelation (Append α₁ α₂ m β) m where
|
||||
Rel := Append.Rel α₁ α₂ m β
|
||||
wf := by
|
||||
apply InvImage.wf
|
||||
refine ⟨fun (a, b) => Prod.lexAccessible (WellFounded.apply ?_ a) (WellFounded.apply ?_) b⟩
|
||||
· exact Option.wellFounded_lt <| InvImage.wf _ WellFoundedRelation.wf
|
||||
· exact InvImage.wf _ WellFoundedRelation.wf
|
||||
subrelation {it it'} h := by
|
||||
obtain ⟨step, h, h'⟩ := h
|
||||
cases h' <;> cases h
|
||||
case fstYield =>
|
||||
apply Append.rel_of_fst
|
||||
exact IterM.TerminationMeasures.Finite.rel_of_yield ‹_›
|
||||
case fstSkip =>
|
||||
apply Append.rel_of_fst
|
||||
exact IterM.TerminationMeasures.Finite.rel_of_skip ‹_›
|
||||
case fstDone =>
|
||||
exact Append.rel_fstDone
|
||||
case sndYield =>
|
||||
apply Append.rel_of_snd
|
||||
exact IterM.TerminationMeasures.Finite.rel_of_yield ‹_›
|
||||
case sndSkip =>
|
||||
apply Append.rel_of_snd
|
||||
exact IterM.TerminationMeasures.Finite.rel_of_skip ‹_›
|
||||
|
||||
@[no_expose]
|
||||
public instance Append.instFinite [Monad m] [Iterator α₁ m β] [Iterator α₂ m β]
|
||||
[Finite α₁ m] [Finite α₂ m] : Finite (Append α₁ α₂ m β) m :=
|
||||
.of_finitenessRelation instFinitenessRelation
|
||||
|
||||
end Finite
|
||||
|
||||
section Productive
|
||||
|
||||
variable {α₁ : Type w} {α₂ : Type w} {m : Type w → Type w'} {β : Type w}
|
||||
|
||||
variable (α₁ α₂ m β) in
|
||||
def Append.ProductiveRel [Monad m] [Iterator α₁ m β] [Iterator α₂ m β]
|
||||
[Productive α₁ m] [Productive α₂ m] :
|
||||
IterM (α := Append α₁ α₂ m β) m β → IterM (α := Append α₁ α₂ m β) m β → Prop :=
|
||||
InvImage
|
||||
(Prod.Lex
|
||||
(Option.lt (InvImage IterM.TerminationMeasures.Productive.Rel IterM.finitelyManySkips))
|
||||
(InvImage IterM.TerminationMeasures.Productive.Rel IterM.finitelyManySkips))
|
||||
(fun it => match it.internalState with
|
||||
| .fst it₁ it₂ => (some it₁, it₂)
|
||||
| .snd it₂ => (none, it₂))
|
||||
|
||||
theorem Append.productiveRel_of_fst [Monad m] [Iterator α₁ m β] [Iterator α₂ m β]
|
||||
[Productive α₁ m] [Productive α₂ m] {it₁ it₁' : IterM (α := α₁) m β}
|
||||
{it₂ : IterM (α := α₂) m β}
|
||||
(h : it₁'.finitelyManySkips.Rel it₁.finitelyManySkips) :
|
||||
Append.ProductiveRel α₁ α₂ m β (it₁'.append it₂) (it₁.append it₂) := by
|
||||
exact Prod.Lex.left _ _ h
|
||||
|
||||
theorem Append.productiveRel_fstDone [Monad m] [Iterator α₁ m β] [Iterator α₂ m β]
|
||||
[Productive α₁ m] [Productive α₂ m] {it₁ : IterM (α := α₁) m β}
|
||||
{it₂ : IterM (α := α₂) m β} :
|
||||
Append.ProductiveRel α₁ α₂ m β (IterM.Intermediate.appendSnd α₁ it₂) (it₁.append it₂) := by
|
||||
exact Prod.Lex.left _ _ trivial
|
||||
|
||||
theorem Append.productiveRel_of_snd [Monad m] [Iterator α₁ m β] [Iterator α₂ m β]
|
||||
[Productive α₁ m] [Productive α₂ m] {it₂ it₂' : IterM (α := α₂) m β}
|
||||
(h : it₂'.finitelyManySkips.Rel it₂.finitelyManySkips) :
|
||||
Append.ProductiveRel α₁ α₂ m β
|
||||
(IterM.Intermediate.appendSnd α₁ it₂') (IterM.Intermediate.appendSnd α₁ it₂) := by
|
||||
exact Prod.Lex.right _ h
|
||||
|
||||
private def Append.instProductivenessRelation [Monad m] [Iterator α₁ m β] [Iterator α₂ m β]
|
||||
[Productive α₁ m] [Productive α₂ m] :
|
||||
ProductivenessRelation (Append α₁ α₂ m β) m where
|
||||
Rel := Append.ProductiveRel α₁ α₂ m β
|
||||
wf := by
|
||||
apply InvImage.wf
|
||||
refine ⟨fun (a, b) => Prod.lexAccessible (WellFounded.apply ?_ a) (WellFounded.apply ?_) b⟩
|
||||
· exact Option.wellFounded_lt <| InvImage.wf _ WellFoundedRelation.wf
|
||||
· exact InvImage.wf _ WellFoundedRelation.wf
|
||||
subrelation {it it'} h := by
|
||||
cases h
|
||||
case fstSkip =>
|
||||
apply Append.productiveRel_of_fst
|
||||
exact IterM.TerminationMeasures.Productive.rel_of_skip ‹_›
|
||||
case fstDone =>
|
||||
exact Append.productiveRel_fstDone
|
||||
case sndSkip =>
|
||||
apply Append.productiveRel_of_snd
|
||||
exact IterM.TerminationMeasures.Productive.rel_of_skip ‹_›
|
||||
|
||||
instance Append.instProductive [Monad m] [Iterator α₁ m β] [Iterator α₂ m β]
|
||||
[Productive α₁ m] [Productive α₂ m] : Productive (Append α₁ α₂ m β) m :=
|
||||
.of_productivenessRelation instProductivenessRelation
|
||||
|
||||
end Productive
|
||||
|
||||
end Std.Iterators.Types
|
||||
@@ -362,8 +362,7 @@ def Flatten.instProductivenessRelation [Monad m] [Iterator α m (IterM (α := α
|
||||
case innerDone =>
|
||||
apply Flatten.productiveRel_of_right₂
|
||||
|
||||
@[no_expose]
|
||||
public def Flatten.instProductive [Monad m] [Iterator α m (IterM (α := α₂) m β)] [Iterator α₂ m β]
|
||||
public theorem Flatten.instProductive [Monad m] [Iterator α m (IterM (α := α₂) m β)] [Iterator α₂ m β]
|
||||
[Finite α m] [Productive α₂ m] : Productive (Flatten α α₂ β m) m :=
|
||||
.of_productivenessRelation instProductivenessRelation
|
||||
|
||||
|
||||
@@ -35,7 +35,7 @@ A `ForIn'` instance for iterators. Its generic membership relation is not easy t
|
||||
so this is not marked as `instance`. This way, more convenient instances can be built on top of it
|
||||
or future library improvements will make it more comfortable.
|
||||
-/
|
||||
@[always_inline, inline]
|
||||
@[always_inline, inline, expose, implicit_reducible]
|
||||
def Iter.instForIn' {α : Type w} {β : Type w} {n : Type x → Type x'} [Monad n]
|
||||
[Iterator α Id β] [IteratorLoop α Id n] :
|
||||
ForIn' n (Iter (α := α) β) β ⟨fun it out => it.IsPlausibleIndirectOutput out⟩ where
|
||||
@@ -53,7 +53,7 @@ instance (α : Type w) (β : Type w) (n : Type x → Type x') [Monad n]
|
||||
/--
|
||||
An implementation of `for h : ... in ... do ...` notation for partial iterators.
|
||||
-/
|
||||
@[always_inline, inline]
|
||||
@[always_inline, inline, expose, implicit_reducible]
|
||||
def Iter.Partial.instForIn' {α : Type w} {β : Type w} {n : Type x → Type x'} [Monad n]
|
||||
[Iterator α Id β] [IteratorLoop α Id n] :
|
||||
ForIn' n (Iter.Partial (α := α) β) β ⟨fun it out => it.it.IsPlausibleIndirectOutput out⟩ where
|
||||
@@ -71,7 +71,7 @@ instance (α : Type w) (β : Type w) (n : Type x → Type x') [Monad n]
|
||||
A `ForIn'` instance for iterators that is guaranteed to terminate after finitely many steps.
|
||||
It is not marked as an instance because the membership predicate is difficult to work with.
|
||||
-/
|
||||
@[always_inline, inline]
|
||||
@[always_inline, inline, expose, implicit_reducible]
|
||||
def Iter.Total.instForIn' {α : Type w} {β : Type w} {n : Type x → Type x'} [Monad n]
|
||||
[Iterator α Id β] [IteratorLoop α Id n] [Finite α Id] :
|
||||
ForIn' n (Iter.Total (α := α) β) β ⟨fun it out => it.it.IsPlausibleIndirectOutput out⟩ where
|
||||
|
||||
@@ -159,7 +159,7 @@ This is the default implementation of the `IteratorLoop` class.
|
||||
It simply iterates through the iterator using `IterM.step`. For certain iterators, more efficient
|
||||
implementations are possible and should be used instead.
|
||||
-/
|
||||
@[always_inline, inline, expose]
|
||||
@[always_inline, inline, expose, implicit_reducible]
|
||||
def IteratorLoop.defaultImplementation {α : Type w} {m : Type w → Type w'} {n : Type x → Type x'}
|
||||
[Monad n] [Iterator α m β] :
|
||||
IteratorLoop α m n where
|
||||
@@ -211,7 +211,7 @@ theorem IteratorLoop.wellFounded_of_productive {α β : Type w} {m : Type w →
|
||||
/--
|
||||
This `ForIn'`-style loop construct traverses a finite iterator using an `IteratorLoop` instance.
|
||||
-/
|
||||
@[always_inline, inline]
|
||||
@[always_inline, inline, expose, implicit_reducible]
|
||||
def IteratorLoop.finiteForIn' {m : Type w → Type w'} {n : Type x → Type x'}
|
||||
{α : Type w} {β : Type w} [Iterator α m β] [IteratorLoop α m n] [Monad n]
|
||||
(lift : ∀ γ δ, (γ → n δ) → m γ → n δ) :
|
||||
@@ -224,7 +224,7 @@ A `ForIn'` instance for iterators. Its generic membership relation is not easy t
|
||||
so this is not marked as `instance`. This way, more convenient instances can be built on top of it
|
||||
or future library improvements will make it more comfortable.
|
||||
-/
|
||||
@[always_inline, inline]
|
||||
@[always_inline, inline, expose, implicit_reducible]
|
||||
def IterM.instForIn' {m : Type w → Type w'} {n : Type w → Type w''}
|
||||
{α : Type w} {β : Type w} [Iterator α m β] [IteratorLoop α m n] [Monad n]
|
||||
[MonadLiftT m n] :
|
||||
@@ -239,7 +239,7 @@ instance IterM.instForInOfIteratorLoop {m : Type w → Type w'} {n : Type w →
|
||||
instForInOfForIn'
|
||||
|
||||
/-- Internal implementation detail of the iterator library. -/
|
||||
@[always_inline, inline]
|
||||
@[always_inline, inline, expose, implicit_reducible]
|
||||
def IterM.Partial.instForIn' {m : Type w → Type w'} {n : Type w → Type w''}
|
||||
{α : Type w} {β : Type w} [Iterator α m β] [IteratorLoop α m n] [MonadLiftT m n] [Monad n] :
|
||||
ForIn' n (IterM.Partial (α := α) m β) β ⟨fun it out => it.it.IsPlausibleIndirectOutput out⟩ where
|
||||
@@ -247,7 +247,7 @@ def IterM.Partial.instForIn' {m : Type w → Type w'} {n : Type w → Type w''}
|
||||
haveI := @IterM.instForIn'; forIn' it.it init f
|
||||
|
||||
/-- Internal implementation detail of the iterator library. -/
|
||||
@[always_inline, inline]
|
||||
@[always_inline, inline, expose, implicit_reducible]
|
||||
def IterM.Total.instForIn' {m : Type w → Type w'} {n : Type w → Type w''}
|
||||
{α : Type w} {β : Type w} [Iterator α m β] [IteratorLoop α m n] [MonadLiftT m n] [Monad n]
|
||||
[Finite α m] :
|
||||
|
||||
@@ -70,7 +70,7 @@ theorem LawfulMonadLiftFunction.lift_seqRight [LawfulMonad m] [LawfulMonad n]
|
||||
abbrev idToMonad [Monad m] ⦃α : Type u⦄ (x : Id α) : m α :=
|
||||
pure x.run
|
||||
|
||||
def LawfulMonadLiftFunction.idToMonad [Monad m] [LawfulMonad m] :
|
||||
theorem LawfulMonadLiftFunction.idToMonad [LawfulMonad m] :
|
||||
LawfulMonadLiftFunction (m := Id) (n := m) idToMonad where
|
||||
lift_pure := by simp [Internal.idToMonad]
|
||||
lift_bind := by simp [Internal.idToMonad]
|
||||
@@ -95,7 +95,7 @@ instance [LawfulMonadLiftBindFunction (n := n) (fun _ _ f x => lift x >>= f)] [L
|
||||
simpa using LawfulMonadLiftBindFunction.liftBind_bind (n := n)
|
||||
(liftBind := fun _ _ f x => lift x >>= f) (β := β) (γ := γ) (δ := γ) pure x g
|
||||
|
||||
def LawfulMonadLiftBindFunction.id [Monad m] [LawfulMonad m] :
|
||||
theorem LawfulMonadLiftBindFunction.id [LawfulMonad m] :
|
||||
LawfulMonadLiftBindFunction (m := Id) (n := m) (fun _ _ f x => f x.run) where
|
||||
liftBind_pure := by simp
|
||||
liftBind_bind := by simp
|
||||
|
||||
@@ -6,6 +6,7 @@ Authors: Paul Reichert
|
||||
module
|
||||
|
||||
prelude
|
||||
public import Init.Data.Iterators.Lemmas.Combinators.Append
|
||||
public import Init.Data.Iterators.Lemmas.Combinators.Attach
|
||||
public import Init.Data.Iterators.Lemmas.Combinators.Monadic
|
||||
public import Init.Data.Iterators.Lemmas.Combinators.FilterMap
|
||||
|
||||
193
src/Init/Data/Iterators/Lemmas/Combinators/Append.lean
Normal file
193
src/Init/Data/Iterators/Lemmas/Combinators/Append.lean
Normal file
@@ -0,0 +1,193 @@
|
||||
/-
|
||||
Copyright (c) 2026 Lean FRO, LLC. All rights reserved.
|
||||
Released under Apache 2.0 license as described in the file LICENSE.
|
||||
Authors: Paul Reichert
|
||||
-/
|
||||
module
|
||||
|
||||
prelude
|
||||
public import Init.Data.Iterators.Combinators.Append
|
||||
public import Init.Data.Iterators.Lemmas.Combinators.Monadic.Append
|
||||
public import Init.Data.Iterators.Consumers.Collect
|
||||
public import Init.Data.Iterators.Consumers.Access
|
||||
import Init.Data.Iterators.Lemmas.Consumers.Collect
|
||||
import Init.Data.Iterators.Lemmas.Consumers.Access
|
||||
import Init.Data.Iterators.Lemmas.Basic
|
||||
import Init.Omega
|
||||
|
||||
public section
|
||||
|
||||
namespace Std
|
||||
open Std.Iterators Std.Iterators.Types
|
||||
|
||||
theorem Iter.append_eq_toIter_append_toIterM {α₁ α₂ β : Type w}
|
||||
[Iterator α₁ Id β] [Iterator α₂ Id β]
|
||||
{it₁ : Iter (α := α₁) β} {it₂ : Iter (α := α₂) β} :
|
||||
it₁.append it₂ = (it₁.toIterM.append it₂.toIterM).toIter :=
|
||||
rfl
|
||||
|
||||
theorem Iter.Intermediate.appendSnd_eq_toIter_appendSnd_toIterM {α₁ α₂ β : Type w}
|
||||
[Iterator α₁ Id β] [Iterator α₂ Id β]
|
||||
{it₂ : Iter (α := α₂) β} :
|
||||
Iter.Intermediate.appendSnd α₁ it₂ = (IterM.Intermediate.appendSnd α₁ it₂.toIterM).toIter :=
|
||||
rfl
|
||||
|
||||
theorem Iter.step_append {α₁ α₂ β : Type w}
|
||||
[Iterator α₁ Id β] [Iterator α₂ Id β]
|
||||
{it₁ : Iter (α := α₁) β} {it₂ : Iter (α := α₂) β} :
|
||||
(it₁.append it₂).step =
|
||||
match it₁.step with
|
||||
| .yield it₁' out h => .yield (it₁'.append it₂) out (.fstYield h)
|
||||
| .skip it₁' h => .skip (it₁'.append it₂) (.fstSkip h)
|
||||
| .done h => .skip (Iter.Intermediate.appendSnd α₁ it₂) (.fstDone h) := by
|
||||
simp only [Iter.step, append_eq_toIter_append_toIterM, toIterM_toIter, IterM.step_append,
|
||||
Id.run_bind]
|
||||
cases it₁.toIterM.step.run.inflate using PlausibleIterStep.casesOn <;>
|
||||
simp [Intermediate.appendSnd_eq_toIter_appendSnd_toIterM]
|
||||
|
||||
theorem Iter.Intermediate.step_appendSnd {α₁ α₂ β : Type w}
|
||||
[Iterator α₁ Id β] [Iterator α₂ Id β]
|
||||
{it₂ : Iter (α := α₂) β} :
|
||||
(Iter.Intermediate.appendSnd α₁ it₂).step =
|
||||
match it₂.step with
|
||||
| .yield it₂' out h => .yield (Iter.Intermediate.appendSnd α₁ it₂') out (.sndYield h)
|
||||
| .skip it₂' h => .skip (Iter.Intermediate.appendSnd α₁ it₂') (.sndSkip h)
|
||||
| .done h => .done (.sndDone h) := by
|
||||
simp only [Iter.step, appendSnd, toIterM_toIter, IterM.Intermediate.step_appendSnd, Id.run_bind]
|
||||
cases it₂.toIterM.step.run.inflate using PlausibleIterStep.casesOn <;> simp
|
||||
|
||||
@[simp]
|
||||
theorem Iter.toList_append {α₁ α₂ β : Type w}
|
||||
[Iterator α₁ Id β] [Iterator α₂ Id β] [Finite α₁ Id] [Finite α₂ Id]
|
||||
{it₁ : Iter (α := α₁) β} {it₂ : Iter (α := α₂) β} :
|
||||
(it₁.append it₂).toList = it₁.toList ++ it₂.toList := by
|
||||
simp [append_eq_toIter_append_toIterM, toList_eq_toList_toIterM]
|
||||
|
||||
@[simp]
|
||||
theorem Iter.toListRev_append {α₁ α₂ β : Type w}
|
||||
[Iterator α₁ Id β] [Iterator α₂ Id β] [Finite α₁ Id] [Finite α₂ Id]
|
||||
{it₁ : Iter (α := α₁) β} {it₂ : Iter (α := α₂) β} :
|
||||
(it₁.append it₂).toListRev = it₂.toListRev ++ it₁.toListRev := by
|
||||
simp [append_eq_toIter_append_toIterM, toListRev_eq_toListRev_toIterM]
|
||||
|
||||
@[simp]
|
||||
theorem Iter.toArray_append {α₁ α₂ β : Type w}
|
||||
[Iterator α₁ Id β] [Iterator α₂ Id β] [Finite α₁ Id] [Finite α₂ Id]
|
||||
{it₁ : Iter (α := α₁) β} {it₂ : Iter (α := α₂) β} :
|
||||
(it₁.append it₂).toArray = it₁.toArray ++ it₂.toArray := by
|
||||
simp [append_eq_toIter_append_toIterM, toArray_eq_toArray_toIterM]
|
||||
|
||||
@[simp]
|
||||
theorem Iter.atIdxSlow?_appendSnd {α₁ α₂ β : Type w}
|
||||
[Iterator α₁ Id β] [Iterator α₂ Id β] [Productive α₁ Id] [Productive α₂ Id]
|
||||
{it₂ : Iter (α := α₂) β} {n : Nat} :
|
||||
(Iter.Intermediate.appendSnd α₁ it₂).atIdxSlow? n = it₂.atIdxSlow? n := by
|
||||
induction n, it₂ using Iter.atIdxSlow?.induct_unfolding with
|
||||
| yield_zero it it' out h h' =>
|
||||
simp only [atIdxSlow?_eq_match (it := Iter.Intermediate.appendSnd α₁ it),
|
||||
Intermediate.step_appendSnd, h']
|
||||
| yield_succ it it' out h h' n ih =>
|
||||
simp only [atIdxSlow?_eq_match (it := Iter.Intermediate.appendSnd α₁ it),
|
||||
Intermediate.step_appendSnd, h', ih]
|
||||
| skip_case n it it' h h' ih =>
|
||||
simp only [atIdxSlow?_eq_match (it := Iter.Intermediate.appendSnd α₁ it),
|
||||
Intermediate.step_appendSnd, h', ih]
|
||||
| done_case n it h h' =>
|
||||
simp only [atIdxSlow?_eq_match (it := Iter.Intermediate.appendSnd α₁ it),
|
||||
Intermediate.step_appendSnd, h']
|
||||
|
||||
theorem Iter.atIdxSlow?_append_of_eq_some {α₁ α₂ β : Type w}
|
||||
[Iterator α₁ Id β] [Iterator α₂ Id β] [Productive α₁ Id] [Productive α₂ Id]
|
||||
{it₁ : Iter (α := α₁) β} {it₂ : Iter (α := α₂) β} {n : Nat} {b : β}
|
||||
(h : it₁.atIdxSlow? n = some b) :
|
||||
(it₁.append it₂).atIdxSlow? n = some b := by
|
||||
induction n, it₁ using Iter.atIdxSlow?.induct_unfolding generalizing it₂ with
|
||||
| yield_zero it it' out hp h' =>
|
||||
rw [atIdxSlow?_eq_match (it := it.append it₂)]
|
||||
cases h
|
||||
simp [step_append, h']
|
||||
| yield_succ it it' out hp h' n ih =>
|
||||
rw [atIdxSlow?_eq_match (it := it.append it₂)]
|
||||
simp [step_append, h', ih h]
|
||||
| skip_case n it it' hp h' ih =>
|
||||
rw [atIdxSlow?_eq_match (it := it.append it₂)]
|
||||
simp [step_append, h', ih h]
|
||||
| done_case n it hp h' =>
|
||||
cases h
|
||||
|
||||
theorem Iter.atIdxSlow?_append {α₁ α₂ β : Type w}
|
||||
[Iterator α₁ Id β] [Iterator α₂ Id β] [Finite α₁ Id] [Productive α₂ Id]
|
||||
{it₁ : Iter (α := α₁) β} {it₂ : Iter (α := α₂) β} {n : Nat} :
|
||||
(it₁.append it₂).atIdxSlow? n =
|
||||
if n < it₁.toList.length then it₁.atIdxSlow? n
|
||||
else it₂.atIdxSlow? (n - it₁.toList.length) := by
|
||||
induction n, it₁ using Iter.atIdxSlow?.induct_unfolding generalizing it₂ with
|
||||
| yield_zero it it' out h h' =>
|
||||
simp only [atIdxSlow?_eq_match (it := it.append it₂), step_append, h']
|
||||
rw [toList_eq_match_step (it := it)]
|
||||
simp [h']
|
||||
| yield_succ it it' out h h' n ih =>
|
||||
simp only [atIdxSlow?_eq_match (it := it.append it₂), step_append, h', ih]
|
||||
rw [toList_eq_match_step (it := it)]
|
||||
simp [h', Nat.succ_lt_succ_iff, Nat.succ_sub_succ]
|
||||
| skip_case n it it' h h' ih =>
|
||||
simp only [atIdxSlow?_eq_match (it := it.append it₂), step_append, h', ih]
|
||||
rw [toList_eq_match_step (it := it)]
|
||||
simp [h']
|
||||
| done_case n it h h' =>
|
||||
simp [atIdxSlow?_eq_match (it := it.append it₂), step_append, h',
|
||||
atIdxSlow?_appendSnd, toList_eq_match_step]
|
||||
|
||||
theorem Iter.atIdxSlow?_append_of_productive {α₁ α₂ β : Type w}
|
||||
[Iterator α₁ Id β] [Iterator α₂ Id β] [Productive α₁ Id] [Productive α₂ Id]
|
||||
{it₁ : Iter (α := α₁) β} {it₂ : Iter (α := α₂) β} {n k : Nat}
|
||||
(hk : it₁.atIdxSlow? k = none)
|
||||
(hmin : ∀ j, j < k → (it₁.atIdxSlow? j).isSome)
|
||||
(hle : k ≤ n) :
|
||||
(it₁.append it₂).atIdxSlow? n = it₂.atIdxSlow? (n - k) := by
|
||||
induction n, it₁ using Iter.atIdxSlow?.induct_unfolding generalizing k it₂ with
|
||||
| yield_zero it it' out hp h' =>
|
||||
exfalso
|
||||
have : k = 0 := by omega
|
||||
subst this
|
||||
rw [atIdxSlow?_eq_match (it := it)] at hk
|
||||
simp [h'] at hk
|
||||
| yield_succ it it' out hp h' n ih =>
|
||||
rw [atIdxSlow?_eq_match (it := it.append it₂)]
|
||||
simp only [step_append, h']
|
||||
match k with
|
||||
| 0 =>
|
||||
rw [atIdxSlow?_eq_match (it := it)] at hk
|
||||
simp [h'] at hk
|
||||
| k + 1 =>
|
||||
rw [atIdxSlow?_eq_match (it := it)] at hk
|
||||
simp [h'] at hk
|
||||
have hmin' : ∀ j, j < k → (it'.atIdxSlow? j).isSome := by
|
||||
intro j hj
|
||||
have h := hmin (j + 1) (by omega)
|
||||
rw [atIdxSlow?_eq_match (it := it)] at h
|
||||
simpa [h'] using h
|
||||
rw [ih hk hmin' (by omega)]
|
||||
congr 1
|
||||
omega
|
||||
| skip_case n it it' hp h' ih =>
|
||||
rw [atIdxSlow?_eq_match (it := it.append it₂)]
|
||||
simp only [step_append, h']
|
||||
rw [atIdxSlow?_eq_match (it := it)] at hk; simp [h'] at hk
|
||||
have hmin' : ∀ j, j < k → (it'.atIdxSlow? j).isSome := by
|
||||
intro j hj
|
||||
have h := hmin j hj
|
||||
rw [atIdxSlow?_eq_match (it := it)] at h
|
||||
simpa [h'] using h
|
||||
exact ih hk hmin' hle
|
||||
| done_case n it hp h' =>
|
||||
rw [atIdxSlow?_eq_match (it := it.append it₂)]
|
||||
simp only [step_append, h', atIdxSlow?_appendSnd]
|
||||
have hk0 : k = 0 := by
|
||||
false_or_by_contra
|
||||
have h := hmin 0 (by omega)
|
||||
rw [atIdxSlow?_eq_match (it := it)] at h
|
||||
simp [h'] at h
|
||||
simp [hk0]
|
||||
|
||||
end Std
|
||||
@@ -435,8 +435,9 @@ theorem Iter.forIn_filterMapWithPostcondition
|
||||
match ← (f out).run with
|
||||
| some c => g c acc
|
||||
| none => return .yield acc) := by
|
||||
simp +instances [Iter.forIn_eq_forIn_toIterM, filterMapWithPostcondition, IterM.forIn_filterMapWithPostcondition,
|
||||
instMonadLiftTOfMonadLift_instMonadLiftTOfPure]; rfl
|
||||
simp only [filterMapWithPostcondition, IterM.forIn_filterMapWithPostcondition, forIn_eq_forIn_toIterM]
|
||||
rw [instMonadLiftTOfMonadLift_instMonadLiftTOfPure]
|
||||
rfl -- expressions are equal up to different matchers
|
||||
|
||||
theorem Iter.forIn_filterMapM
|
||||
[Monad n] [LawfulMonad n] [Monad o] [LawfulMonad o]
|
||||
@@ -448,8 +449,9 @@ theorem Iter.forIn_filterMapM
|
||||
match ← f out with
|
||||
| some c => g c acc
|
||||
| none => return .yield acc) := by
|
||||
simp +instances [filterMapM, forIn_eq_forIn_toIterM, IterM.forIn_filterMapM,
|
||||
instMonadLiftTOfMonadLift_instMonadLiftTOfPure]; rfl
|
||||
simp [filterMapM, forIn_eq_forIn_toIterM, IterM.forIn_filterMapM]
|
||||
rw [instMonadLiftTOfMonadLift_instMonadLiftTOfPure]
|
||||
rfl
|
||||
|
||||
theorem Iter.forIn_filterMap
|
||||
[Monad n] [LawfulMonad n] [Finite α Id]
|
||||
@@ -469,8 +471,8 @@ theorem Iter.forIn_mapWithPostcondition
|
||||
{g : β₂ → γ → o (ForInStep γ)} :
|
||||
forIn (it.mapWithPostcondition f) init g =
|
||||
forIn it init (fun out acc => do g (← (f out).run) acc) := by
|
||||
simp +instances [mapWithPostcondition, forIn_eq_forIn_toIterM, IterM.forIn_mapWithPostcondition,
|
||||
instMonadLiftTOfMonadLift_instMonadLiftTOfPure]
|
||||
simp only [mapWithPostcondition, forIn_eq_forIn_toIterM, IterM.forIn_mapWithPostcondition]
|
||||
rw [instMonadLiftTOfMonadLift_instMonadLiftTOfPure]
|
||||
|
||||
theorem Iter.forIn_mapM
|
||||
[Monad n] [LawfulMonad n] [Monad o] [LawfulMonad o]
|
||||
@@ -498,8 +500,8 @@ theorem Iter.forIn_filterWithPostcondition
|
||||
haveI : MonadLift n o := ⟨monadLift⟩
|
||||
forIn (it.filterWithPostcondition f) init g =
|
||||
forIn it init (fun out acc => do if (← (f out).run).down then g out acc else return .yield acc) := by
|
||||
simp +instances [filterWithPostcondition, forIn_eq_forIn_toIterM, IterM.forIn_filterWithPostcondition,
|
||||
instMonadLiftTOfMonadLift_instMonadLiftTOfPure]
|
||||
simp only [filterWithPostcondition, forIn_eq_forIn_toIterM, IterM.forIn_filterWithPostcondition]
|
||||
rw [instMonadLiftTOfMonadLift_instMonadLiftTOfPure]
|
||||
|
||||
theorem Iter.forIn_filterM
|
||||
[Monad n] [LawfulMonad n] [Monad o] [LawfulMonad o]
|
||||
@@ -508,8 +510,8 @@ theorem Iter.forIn_filterM
|
||||
[IteratorLoop α Id o] [LawfulIteratorLoop α Id o]
|
||||
{it : Iter (α := α) β} {f : β → n (ULift Bool)} {init : γ} {g : β → γ → o (ForInStep γ)} :
|
||||
forIn (it.filterM f) init g = forIn it init (fun out acc => do if (← f out).down then g out acc else return .yield acc) := by
|
||||
simp +instances [filterM, forIn_eq_forIn_toIterM, IterM.forIn_filterM,
|
||||
instMonadLiftTOfMonadLift_instMonadLiftTOfPure]
|
||||
simp only [filterM, forIn_eq_forIn_toIterM, IterM.forIn_filterM]
|
||||
rw [instMonadLiftTOfMonadLift_instMonadLiftTOfPure]
|
||||
|
||||
theorem Iter.forIn_filter
|
||||
[Monad n] [LawfulMonad n]
|
||||
@@ -550,8 +552,9 @@ theorem Iter.foldM_filterMapM {α β γ δ : Type w}
|
||||
it.foldM (init := init) (fun d b => do
|
||||
let some c ← f b | pure d
|
||||
g d c) := by
|
||||
simp +instances [filterMapM, IterM.foldM_filterMapM, foldM_eq_foldM_toIterM,
|
||||
instMonadLiftTOfMonadLift_instMonadLiftTOfPure]; rfl
|
||||
simp only [filterMapM, IterM.foldM_filterMapM, foldM_eq_foldM_toIterM]
|
||||
rw [instMonadLiftTOfMonadLift_instMonadLiftTOfPure]
|
||||
rfl
|
||||
|
||||
theorem Iter.foldM_mapWithPostcondition {α β γ δ : Type w}
|
||||
{n : Type w → Type w''} {o : Type w → Type w'''}
|
||||
@@ -563,8 +566,8 @@ theorem Iter.foldM_mapWithPostcondition {α β γ δ : Type w}
|
||||
{f : β → PostconditionT n γ} {g : δ → γ → o δ} {init : δ} {it : Iter (α := α) β} :
|
||||
(it.mapWithPostcondition f).foldM (init := init) g =
|
||||
it.foldM (init := init) (fun d b => do let c ← (f b).run; g d c) := by
|
||||
simp +instances [mapWithPostcondition, IterM.foldM_mapWithPostcondition, foldM_eq_foldM_toIterM,
|
||||
instMonadLiftTOfMonadLift_instMonadLiftTOfPure]
|
||||
simp only [mapWithPostcondition, IterM.foldM_mapWithPostcondition, foldM_eq_foldM_toIterM]
|
||||
rw [instMonadLiftTOfMonadLift_instMonadLiftTOfPure]
|
||||
|
||||
theorem Iter.foldM_mapM {α β γ δ : Type w}
|
||||
{n : Type w → Type w''} {o : Type w → Type w'''}
|
||||
@@ -578,8 +581,8 @@ theorem Iter.foldM_mapM {α β γ δ : Type w}
|
||||
haveI : MonadLift n o := ⟨MonadLiftT.monadLift⟩
|
||||
(it.mapM f).foldM (init := init) g =
|
||||
it.foldM (init := init) (fun d b => do let c ← f b; g d c) := by
|
||||
simp +instances [mapM, IterM.foldM_mapM, foldM_eq_foldM_toIterM,
|
||||
instMonadLiftTOfMonadLift_instMonadLiftTOfPure]
|
||||
simp only [mapM, IterM.foldM_mapM, foldM_eq_foldM_toIterM]
|
||||
rw [instMonadLiftTOfMonadLift_instMonadLiftTOfPure]
|
||||
|
||||
theorem Iter.foldM_filterWithPostcondition {α β δ : Type w}
|
||||
{n : Type w → Type w''} {o : Type w → Type w'''}
|
||||
@@ -591,8 +594,8 @@ theorem Iter.foldM_filterWithPostcondition {α β δ : Type w}
|
||||
{f : β → PostconditionT n (ULift Bool)} {g : δ → β → o δ} {init : δ} {it : Iter (α := α) β} :
|
||||
(it.filterWithPostcondition f).foldM (init := init) g =
|
||||
it.foldM (init := init) (fun d b => do if (← (f b).run).down then g d b else pure d) := by
|
||||
simp +instances [filterWithPostcondition, IterM.foldM_filterWithPostcondition, foldM_eq_foldM_toIterM,
|
||||
instMonadLiftTOfMonadLift_instMonadLiftTOfPure]
|
||||
simp only [filterWithPostcondition, IterM.foldM_filterWithPostcondition, foldM_eq_foldM_toIterM]
|
||||
rw [instMonadLiftTOfMonadLift_instMonadLiftTOfPure]
|
||||
|
||||
theorem Iter.foldM_filterM {α β δ : Type w}
|
||||
{n : Type w → Type w''} {o : Type w → Type w'''}
|
||||
@@ -605,8 +608,8 @@ theorem Iter.foldM_filterM {α β δ : Type w}
|
||||
{f : β → n (ULift Bool)} {g : δ → β → o δ} {init : δ} {it : Iter (α := α) β} :
|
||||
(it.filterM f).foldM (init := init) g =
|
||||
it.foldM (init := init) (fun d b => do if (← f b).down then g d b else pure d) := by
|
||||
simp +instances [filterM, IterM.foldM_filterM, foldM_eq_foldM_toIterM,
|
||||
instMonadLiftTOfMonadLift_instMonadLiftTOfPure]
|
||||
simp only [filterM, IterM.foldM_filterM, foldM_eq_foldM_toIterM]
|
||||
rw [instMonadLiftTOfMonadLift_instMonadLiftTOfPure]
|
||||
|
||||
theorem Iter.foldM_filterMap {α β γ δ : Type w} {n : Type w → Type w''}
|
||||
[Iterator α Id β] [Finite α Id] [Monad n] [LawfulMonad n]
|
||||
|
||||
@@ -121,22 +121,22 @@ public theorem Iter.step_flatMapAfterM {α : Type w} {β : Type w} {α₂ : Type
|
||||
[Monad m] [MonadAttach m] [LawfulMonad m] [WeaklyLawfulMonadAttach m] [Iterator α Id β] [Iterator α₂ m γ]
|
||||
{f : β → m (IterM (α := α₂) m γ)} {it₁ : Iter (α := α) β} {it₂ : Option (IterM (α := α₂) m γ)} :
|
||||
(it₁.flatMapAfterM f it₂).step = (do
|
||||
match it₂ with
|
||||
match hit : it₂ with
|
||||
| none =>
|
||||
match it₁.step with
|
||||
| .yield it₁' b h =>
|
||||
let fx ← MonadAttach.attach (f b)
|
||||
return .deflate (.skip (it₁'.flatMapAfterM f (some fx.val)) (.outerYield_flatMapM_pure h fx.property))
|
||||
| .skip it₁' h => return .deflate (.skip (it₁'.flatMapAfterM f none) (.outerSkip_flatMapM_pure h))
|
||||
| .done h => return .deflate (.done (.outerDone_flatMapM_pure h))
|
||||
return .deflate (.skip (it₁'.flatMapAfterM f (some fx.val)) (hit ▸ .outerYield_flatMapM_pure h fx.property))
|
||||
| .skip it₁' h => return .deflate (.skip (it₁'.flatMapAfterM f it₂) (hit ▸ .outerSkip_flatMapM_pure h))
|
||||
| .done h => return .deflate (.done (hit ▸ .outerDone_flatMapM_pure h))
|
||||
| some it₂ =>
|
||||
match (← it₂.step).inflate with
|
||||
| .yield it₂' out h =>
|
||||
return .deflate (.yield (it₁.flatMapAfterM f (some it₂')) out (.innerYield_flatMapM_pure h))
|
||||
return .deflate (.yield (it₁.flatMapAfterM f (some it₂')) out (hit ▸ .innerYield_flatMapM_pure h))
|
||||
| .skip it₂' h =>
|
||||
return .deflate (.skip (it₁.flatMapAfterM f (some it₂')) (.innerSkip_flatMapM_pure h))
|
||||
return .deflate (.skip (it₁.flatMapAfterM f (some it₂')) (hit ▸ .innerSkip_flatMapM_pure h))
|
||||
| .done h =>
|
||||
return .deflate (.skip (it₁.flatMapAfterM f none) (.innerDone_flatMapM_pure h))) := by
|
||||
return .deflate (.skip (it₁.flatMapAfterM f none) (hit ▸ .innerDone_flatMapM_pure h))) := by
|
||||
simp only [flatMapAfterM, IterM.step_flatMapAfterM, Iter.step_mapWithPostcondition,
|
||||
PostconditionT.operation_pure]
|
||||
split
|
||||
@@ -232,7 +232,6 @@ public theorem Iter.toArray_flatMapM {α α₂ β γ : Type w} {m : Type w → T
|
||||
(it₁.flatMapM f).toArray = Array.flatten <$> (it₁.mapM fun b => do (← f b).toArray).toArray := by
|
||||
simp [flatMapM, toArray_flatMapAfterM]
|
||||
|
||||
set_option backward.isDefEq.respectTransparency false in
|
||||
public theorem Iter.toList_flatMapAfter {α α₂ β γ : Type w} [Iterator α Id β] [Iterator α₂ Id γ]
|
||||
[Finite α Id] [Finite α₂ Id]
|
||||
{f : β → Iter (α := α₂) γ} {it₁ : Iter (α := α) β} {it₂ : Option (Iter (α := α₂) γ)} :
|
||||
@@ -241,9 +240,9 @@ public theorem Iter.toList_flatMapAfter {α α₂ β γ : Type w} [Iterator α I
|
||||
| some it₂ => it₂.toList ++
|
||||
(it₁.map fun b => (f b).toList).toList.flatten := by
|
||||
simp only [flatMapAfter, Iter.toList, toIterM_toIter, IterM.toList_flatMapAfter]
|
||||
cases it₂ <;> simp [map, IterM.toList_map_eq_toList_mapM, - IterM.toList_map]
|
||||
unfold Iter.toList
|
||||
cases it₂ <;> simp [map]
|
||||
|
||||
set_option backward.isDefEq.respectTransparency false in
|
||||
public theorem Iter.toArray_flatMapAfter {α α₂ β γ : Type w} [Iterator α Id β] [Iterator α₂ Id γ]
|
||||
[Finite α Id] [Finite α₂ Id]
|
||||
{f : β → Iter (α := α₂) γ} {it₁ : Iter (α := α) β} {it₂ : Option (Iter (α := α₂) γ)} :
|
||||
@@ -252,6 +251,7 @@ public theorem Iter.toArray_flatMapAfter {α α₂ β γ : Type w} [Iterator α
|
||||
| some it₂ => it₂.toArray ++
|
||||
(it₁.map fun b => (f b).toArray).toArray.flatten := by
|
||||
simp only [flatMapAfter, Iter.toArray, toIterM_toIter, IterM.toArray_flatMapAfter]
|
||||
unfold Iter.toArray
|
||||
cases it₂ <;> simp [map, IterM.toArray_map_eq_toArray_mapM, - IterM.toArray_map]
|
||||
|
||||
public theorem Iter.toList_flatMap {α α₂ β γ : Type w} [Iterator α Id β] [Iterator α₂ Id γ]
|
||||
|
||||
@@ -6,6 +6,7 @@ Authors: Paul Reichert
|
||||
module
|
||||
|
||||
prelude
|
||||
public import Init.Data.Iterators.Lemmas.Combinators.Monadic.Append
|
||||
public import Init.Data.Iterators.Lemmas.Combinators.Monadic.Attach
|
||||
public import Init.Data.Iterators.Lemmas.Combinators.Monadic.FilterMap
|
||||
public import Init.Data.Iterators.Lemmas.Combinators.Monadic.FlatMap
|
||||
|
||||
107
src/Init/Data/Iterators/Lemmas/Combinators/Monadic/Append.lean
Normal file
107
src/Init/Data/Iterators/Lemmas/Combinators/Monadic/Append.lean
Normal file
@@ -0,0 +1,107 @@
|
||||
/-
|
||||
Copyright (c) 2026 Lean FRO, LLC. All rights reserved.
|
||||
Released under Apache 2.0 license as described in the file LICENSE.
|
||||
Authors: Paul Reichert
|
||||
-/
|
||||
module
|
||||
|
||||
prelude
|
||||
public import Init.Data.Iterators.Combinators.Monadic.Append
|
||||
public import Init.Data.Iterators.Consumers.Monadic.Collect
|
||||
import Init.Data.Iterators.Lemmas.Consumers.Monadic.Collect
|
||||
import Init.Data.Iterators.Lemmas.Monadic.Basic
|
||||
import Init.Data.List.Lemmas
|
||||
import Init.Data.List.ToArray
|
||||
|
||||
public section
|
||||
|
||||
namespace Std
|
||||
open Std.Iterators Std.Iterators.Types
|
||||
|
||||
variable {α₁ α₂ β : Type w} {m : Type w → Type w'}
|
||||
|
||||
theorem IterM.step_append [Monad m] [Iterator α₁ m β] [Iterator α₂ m β]
|
||||
{it₁ : IterM (α := α₁) m β} {it₂ : IterM (α := α₂) m β} :
|
||||
(it₁.append it₂).step = (do
|
||||
match (← it₁.step).inflate with
|
||||
| .yield it₁' out h =>
|
||||
pure <| .deflate <| .yield (it₁'.append it₂) out (.fstYield h)
|
||||
| .skip it₁' h =>
|
||||
pure <| .deflate <| .skip (it₁'.append it₂) (.fstSkip h)
|
||||
| .done h =>
|
||||
pure <| .deflate <| .skip (IterM.Intermediate.appendSnd α₁ it₂) (.fstDone h)) := by
|
||||
simp only [append, Intermediate.appendSnd, step, Iterator.step]
|
||||
apply bind_congr; intro step
|
||||
cases step.inflate using PlausibleIterStep.casesOn <;> rfl
|
||||
|
||||
theorem IterM.Intermediate.step_appendSnd [Monad m] [Iterator α₁ m β] [Iterator α₂ m β]
|
||||
{it₂ : IterM (α := α₂) m β} :
|
||||
(IterM.Intermediate.appendSnd α₁ it₂).step = (do
|
||||
match (← it₂.step).inflate with
|
||||
| .yield it₂' out h =>
|
||||
pure <| .deflate <| .yield (IterM.Intermediate.appendSnd α₁ it₂') out (.sndYield h)
|
||||
| .skip it₂' h =>
|
||||
pure <| .deflate <| .skip (IterM.Intermediate.appendSnd α₁ it₂') (.sndSkip h)
|
||||
| .done h =>
|
||||
pure <| .deflate <| .done (.sndDone h)) := by
|
||||
simp only [Intermediate.appendSnd, step, Iterator.step]
|
||||
apply bind_congr; intro step
|
||||
cases step.inflate using PlausibleIterStep.casesOn <;> rfl
|
||||
|
||||
@[simp]
|
||||
theorem IterM.toList_appendSnd [Monad m] [LawfulMonad m]
|
||||
[Iterator α₁ m β] [Iterator α₂ m β] [Finite α₁ m] [Finite α₂ m]
|
||||
{it₂ : IterM (α := α₂) m β} :
|
||||
(IterM.Intermediate.appendSnd α₁ it₂).toList = it₂.toList := by
|
||||
induction it₂ using IterM.inductSteps with | step it₂ ihy ihs
|
||||
rw [toList_eq_match_step (it := IterM.Intermediate.appendSnd α₁ it₂),
|
||||
toList_eq_match_step (it := it₂)]
|
||||
simp only [Intermediate.step_appendSnd, bind_assoc]
|
||||
apply bind_congr; intro step
|
||||
cases step.inflate using PlausibleIterStep.casesOn
|
||||
· simp [ihy ‹_›]
|
||||
· simp [ihs ‹_›]
|
||||
· simp
|
||||
|
||||
@[simp]
|
||||
theorem IterM.toList_append [Monad m] [LawfulMonad m]
|
||||
[Iterator α₁ m β] [Iterator α₂ m β] [Finite α₁ m] [Finite α₂ m]
|
||||
{it₁ : IterM (α := α₁) m β} {it₂ : IterM (α := α₂) m β} :
|
||||
(it₁.append it₂).toList = (do
|
||||
let l₁ ← it₁.toList
|
||||
let l₂ ← it₂.toList
|
||||
pure (l₁ ++ l₂)) := by
|
||||
induction it₁ using IterM.inductSteps with | step it₁ ihy ihs
|
||||
rw [toList_eq_match_step (it := it₁.append it₂), toList_eq_match_step (it := it₁)]
|
||||
simp only [step_append, bind_assoc]
|
||||
apply bind_congr; intro step
|
||||
cases step.inflate using PlausibleIterStep.casesOn
|
||||
· simp [ihy ‹_›, - bind_pure_comp]
|
||||
· simp [ihs ‹_›]
|
||||
· simp [toList_appendSnd, - bind_pure_comp]
|
||||
|
||||
@[simp]
|
||||
theorem IterM.toListRev_append [Monad m] [LawfulMonad m]
|
||||
[Iterator α₁ m β] [Iterator α₂ m β] [Finite α₁ m] [Finite α₂ m]
|
||||
{it₁ : IterM (α := α₁) m β} {it₂ : IterM (α := α₂) m β} :
|
||||
(it₁.append it₂).toListRev = (do
|
||||
let l₁ ← it₁.toListRev
|
||||
let l₂ ← it₂.toListRev
|
||||
pure (l₂ ++ l₁)) := by
|
||||
rw [toListRev_eq (it := it₁.append it₂), toList_append,
|
||||
toListRev_eq (it := it₁), toListRev_eq (it := it₂)]
|
||||
simp [map_bind, bind_pure_comp, List.reverse_append]
|
||||
|
||||
@[simp]
|
||||
theorem IterM.toArray_append [Monad m] [LawfulMonad m]
|
||||
[Iterator α₁ m β] [Iterator α₂ m β] [Finite α₁ m] [Finite α₂ m]
|
||||
{it₁ : IterM (α := α₁) m β} {it₂ : IterM (α := α₂) m β} :
|
||||
(it₁.append it₂).toArray = (do
|
||||
let a₁ ← it₁.toArray
|
||||
let a₂ ← it₂.toArray
|
||||
pure (a₁ ++ a₂)) := by
|
||||
rw [← toArray_toList (it := it₁.append it₂), toList_append,
|
||||
← toArray_toList (it := it₁), ← toArray_toList (it := it₂)]
|
||||
simp [map_bind, - bind_pure_comp, ← List.toArray_appendList, - toArray_toList]
|
||||
|
||||
end Std
|
||||
@@ -374,7 +374,6 @@ theorem IterM.toList_map_eq_toList_filterMapM {α β γ : Type w} {m : Type w
|
||||
simp [toList_map_eq_toList_mapM, toList_mapM_eq_toList_filterMapM]
|
||||
congr <;> simp
|
||||
|
||||
set_option backward.whnf.reducibleClassField false in
|
||||
/--
|
||||
Variant of `toList_filterMapWithPostcondition_filterMapWithPostcondition` that is intended to be
|
||||
used with the `apply` tactic. Because neither the LHS nor the RHS determine all implicit parameters,
|
||||
@@ -395,7 +394,7 @@ private theorem IterM.toList_filterMapWithPostcondition_filterMapWithPostconditi
|
||||
(it.filterMapWithPostcondition (n := o) fg).toList := by
|
||||
induction it using IterM.inductSteps with | step it ihy ihs
|
||||
letI : MonadLift n o := ⟨monadLift⟩
|
||||
haveI : LawfulMonadLift n o := ⟨by simp +instances [this], by simp +instances [this]⟩
|
||||
haveI : LawfulMonadLift n o := ⟨LawfulMonadLiftT.monadLift_pure, LawfulMonadLiftT.monadLift_bind⟩
|
||||
rw [toList_eq_match_step, toList_eq_match_step, step_filterMapWithPostcondition,
|
||||
bind_assoc, step_filterMapWithPostcondition, step_filterMapWithPostcondition]
|
||||
simp only [bind_assoc, liftM_bind]
|
||||
@@ -602,7 +601,6 @@ theorem IterM.toList_map_mapM {α β γ δ : Type w}
|
||||
toList_filterMapM_mapM]
|
||||
congr <;> simp
|
||||
|
||||
set_option backward.isDefEq.respectTransparency false in
|
||||
@[simp]
|
||||
theorem IterM.toList_filterMapWithPostcondition {α β γ : Type w} {m : Type w → Type w'}
|
||||
[Monad m] [LawfulMonad m]
|
||||
@@ -626,7 +624,6 @@ theorem IterM.toList_filterMapWithPostcondition {α β γ : Type w} {m : Type w
|
||||
· simp [ihs ‹_›, heq]
|
||||
· simp [heq]
|
||||
|
||||
set_option backward.isDefEq.respectTransparency false in
|
||||
@[simp]
|
||||
theorem IterM.toList_mapWithPostcondition {α β γ : Type w} {m : Type w → Type w'}
|
||||
[Monad m] [LawfulMonad m] [Iterator α Id β] [Finite α Id]
|
||||
@@ -647,25 +644,25 @@ theorem IterM.toList_mapWithPostcondition {α β γ : Type w} {m : Type w → Ty
|
||||
· simp [ihs ‹_›, heq]
|
||||
· simp [heq]
|
||||
|
||||
set_option backward.isDefEq.respectTransparency false in
|
||||
@[simp]
|
||||
theorem IterM.toList_filterMapM {α β γ : Type w} {m : Type w → Type w'}
|
||||
[Monad m] [MonadAttach m] [LawfulMonad m] [WeaklyLawfulMonadAttach m]
|
||||
[Iterator α Id β] [Finite α Id]
|
||||
{f : β → m (Option γ)} (it : IterM (α := α) Id β) :
|
||||
(it.filterMapM f).toList = it.toList.run.filterMapM f := by
|
||||
simp [toList_filterMapM_eq_toList_filterMapWithPostcondition, toList_filterMapWithPostcondition,
|
||||
PostconditionT.attachLift, PostconditionT.run_eq_map, WeaklyLawfulMonadAttach.map_attach]
|
||||
simp only [toList_filterMapM_eq_toList_filterMapWithPostcondition,
|
||||
toList_filterMapWithPostcondition, PostconditionT.run_eq_map]
|
||||
simp [PostconditionT.attachLift, WeaklyLawfulMonadAttach.map_attach]
|
||||
|
||||
set_option backward.isDefEq.respectTransparency false in
|
||||
@[simp]
|
||||
theorem IterM.toList_mapM {α β γ : Type w} {m : Type w → Type w'}
|
||||
[Monad m] [MonadAttach m] [LawfulMonad m] [WeaklyLawfulMonadAttach m]
|
||||
[Iterator α Id β] [Finite α Id]
|
||||
{f : β → m γ} (it : IterM (α := α) Id β) :
|
||||
(it.mapM f).toList = it.toList.run.mapM f := by
|
||||
simp [toList_mapM_eq_toList_mapWithPostcondition, toList_mapWithPostcondition,
|
||||
PostconditionT.attachLift, PostconditionT.run_eq_map, WeaklyLawfulMonadAttach.map_attach]
|
||||
simp only [toList_mapM_eq_toList_mapWithPostcondition, toList_mapWithPostcondition,
|
||||
PostconditionT.run_eq_map]
|
||||
simp [PostconditionT.attachLift, WeaklyLawfulMonadAttach.map_attach]
|
||||
|
||||
@[simp]
|
||||
theorem IterM.toList_filterMap {α β γ : Type w} {m : Type w → Type w'}
|
||||
@@ -1303,7 +1300,6 @@ theorem IterM.forIn_filterMap
|
||||
rw [filterMap, forIn_filterMapWithPostcondition]
|
||||
simp [PostconditionT.run_eq_map]
|
||||
|
||||
set_option backward.isDefEq.respectTransparency false in
|
||||
theorem IterM.forIn_mapWithPostcondition
|
||||
[Monad m] [LawfulMonad m] [Monad n] [LawfulMonad n] [Monad o] [LawfulMonad o]
|
||||
[MonadLiftT m n] [LawfulMonadLiftT m n] [MonadLiftT n o] [LawfulMonadLiftT n o]
|
||||
@@ -1314,9 +1310,9 @@ theorem IterM.forIn_mapWithPostcondition
|
||||
haveI : MonadLift n o := ⟨monadLift⟩
|
||||
forIn (it.mapWithPostcondition f) init g =
|
||||
forIn it init (fun out acc => do g (← (f out).run) acc) := by
|
||||
rw [mapWithPostcondition, InternalCombinators.map, ← InternalCombinators.filterMap,
|
||||
← filterMapWithPostcondition, forIn_filterMapWithPostcondition]
|
||||
simp [PostconditionT.run_eq_map]
|
||||
unfold mapWithPostcondition InternalCombinators.map Map.instIterator Map.instIteratorLoop Map
|
||||
rw [← InternalCombinators.filterMap, ← filterMapWithPostcondition, forIn_filterMapWithPostcondition]
|
||||
simp
|
||||
|
||||
theorem IterM.forIn_mapM
|
||||
[Monad m] [LawfulMonad m] [Monad n] [LawfulMonad n] [Monad o] [LawfulMonad o]
|
||||
@@ -1480,7 +1476,7 @@ theorem IterM.foldM_filterM {α β δ : Type w}
|
||||
simp [filterM, foldM_filterMapWithPostcondition, PostconditionT.run_attachLift]
|
||||
congr 1; ext out acc
|
||||
apply bind_congr; intro fx
|
||||
cases fx.down <;> simp [PostconditionT.run_eq_map]
|
||||
cases fx.down <;> simp
|
||||
|
||||
theorem IterM.foldM_filterMap {α β γ δ : Type w} {m : Type w → Type w'} {n : Type w → Type w''}
|
||||
[Iterator α m β] [Finite α m] [Monad m] [Monad n] [LawfulMonad m] [LawfulMonad n]
|
||||
|
||||
@@ -21,14 +21,14 @@ open Std.Internal Std.Iterators
|
||||
theorem IterM.step_flattenAfter {α α₂ β : Type w} {m : Type w → Type w'} [Monad m]
|
||||
[Iterator α m (IterM (α := α₂) m β)] [Iterator α₂ m β]
|
||||
{it₁ : IterM (α := α) m (IterM (α := α₂) m β)} {it₂ : Option (IterM (α := α₂) m β)} :
|
||||
(it₁.flattenAfter it₂).step = (do
|
||||
(it₁.flattenAfter it₂).step = (
|
||||
match it₂ with
|
||||
| none =>
|
||||
| none => do
|
||||
match (← it₁.step).inflate with
|
||||
| .yield it₁' it₂' h => return .deflate (.skip (it₁'.flattenAfter (some it₂')) (.outerYield h))
|
||||
| .skip it₁' h => return .deflate (.skip (it₁'.flattenAfter none) (.outerSkip h))
|
||||
| .done h => return .deflate (.done (.outerDone h))
|
||||
| some it₂ =>
|
||||
| some it₂ => do
|
||||
match (← it₂.step).inflate with
|
||||
| .yield it₂' out h => return .deflate (.yield (it₁.flattenAfter (some it₂')) out (.innerYield h))
|
||||
| .skip it₂' h => return .deflate (.skip (it₁.flattenAfter (some it₂')) (.innerSkip h))
|
||||
@@ -130,16 +130,16 @@ public theorem IterM.step_flatMapAfterM {α : Type w} {β : Type w} {α₂ : Typ
|
||||
{γ : Type w} {m : Type w → Type w'} [Monad m] [MonadAttach m] [LawfulMonad m] [WeaklyLawfulMonadAttach m]
|
||||
[Iterator α m β] [Iterator α₂ m γ] {f : β → m (IterM (α := α₂) m γ)} {it₁ : IterM (α := α) m β}
|
||||
{it₂ : Option (IterM (α := α₂) m γ)} :
|
||||
(it₁.flatMapAfterM f it₂).step = (do
|
||||
(it₁.flatMapAfterM f it₂).step = (
|
||||
match it₂ with
|
||||
| none =>
|
||||
| none => do
|
||||
match (← it₁.step).inflate with
|
||||
| .yield it₁' b h =>
|
||||
let fx ← MonadAttach.attach (f b)
|
||||
return .deflate (.skip (it₁'.flatMapAfterM f (some fx.val)) (.outerYield_flatMapM h fx.property))
|
||||
| .skip it₁' h => return .deflate (.skip (it₁'.flatMapAfterM f none) (.outerSkip_flatMapM h))
|
||||
| .done h => return .deflate (.done (.outerDone_flatMapM h))
|
||||
| some it₂ =>
|
||||
| some it₂ => do
|
||||
match (← it₂.step).inflate with
|
||||
| .yield it₂' out h => return .deflate (.yield (it₁.flatMapAfterM f (some it₂')) out (.innerYield_flatMapM h))
|
||||
| .skip it₂' h => return .deflate (.skip (it₁.flatMapAfterM f (some it₂')) (.innerSkip_flatMapM h))
|
||||
@@ -171,15 +171,15 @@ public theorem IterM.step_flatMapM {α : Type w} {β : Type w} {α₂ : Type w}
|
||||
public theorem IterM.step_flatMapAfter {α : Type w} {β : Type w} {α₂ : Type w}
|
||||
{γ : Type w} {m : Type w → Type w'} [Monad m] [LawfulMonad m] [Iterator α m β] [Iterator α₂ m γ]
|
||||
{f : β → IterM (α := α₂) m γ} {it₁ : IterM (α := α) m β} {it₂ : Option (IterM (α := α₂) m γ)} :
|
||||
(it₁.flatMapAfter f it₂).step = (do
|
||||
(it₁.flatMapAfter f it₂).step = (
|
||||
match it₂ with
|
||||
| none =>
|
||||
| none => do
|
||||
match (← it₁.step).inflate with
|
||||
| .yield it₁' b h =>
|
||||
return .deflate (.skip (it₁'.flatMapAfter f (some (f b))) (.outerYield_flatMap h))
|
||||
| .skip it₁' h => return .deflate (.skip (it₁'.flatMapAfter f none) (.outerSkip_flatMap h))
|
||||
| .done h => return .deflate (.done (.outerDone_flatMap h))
|
||||
| some it₂ =>
|
||||
| some it₂ => do
|
||||
match (← it₂.step).inflate with
|
||||
| .yield it₂' out h => return .deflate (.yield (it₁.flatMapAfter f (some it₂')) out (.innerYield_flatMap h))
|
||||
| .skip it₂' h => return .deflate (.skip (it₁.flatMapAfter f (some it₂')) (.innerSkip_flatMap h))
|
||||
|
||||
@@ -32,11 +32,12 @@ theorem Iter.forIn'_eq {α β : Type w} [Iterator α Id β] [Finite α Id]
|
||||
IterM.DefaultConsumers.forIn' (n := m) (fun _ _ f x => f x.run) γ (fun _ _ _ => True)
|
||||
it.toIterM init _ (fun _ => id)
|
||||
(fun out h acc => return ⟨← f out (Iter.isPlausibleIndirectOutput_iff_isPlausibleIndirectOutput_toIterM.mpr h) acc, trivial⟩) := by
|
||||
simp +instances only [instForIn', ForIn'.forIn', IteratorLoop.finiteForIn']
|
||||
simp only [ForIn'.forIn']
|
||||
have : ∀ a b c, f a b c = (Subtype.val <$> (⟨·, trivial⟩) <$> f a b c) := by simp
|
||||
simp +singlePass only [this]
|
||||
rw [hl.lawful (fun _ _ f x => f x.run) (wf := IteratorLoop.wellFounded_of_finite)]
|
||||
simp +instances [IteratorLoop.defaultImplementation]
|
||||
simp only [IteratorLoop.forIn, Functor.map_map, id_map',
|
||||
bind_pure_comp]
|
||||
|
||||
theorem Iter.forIn_eq {α β : Type w} [Iterator α Id β] [Finite α Id]
|
||||
{m : Type x → Type x'} [Monad m] [LawfulMonad m] [IteratorLoop α Id m]
|
||||
@@ -81,7 +82,7 @@ theorem Iter.forIn'_eq_forIn'_toIterM {α β : Type w} [Iterator α Id β]
|
||||
letI : ForIn' m (IterM (α := α) Id β) β _ := IterM.instForIn'
|
||||
ForIn'.forIn' it.toIterM init
|
||||
(fun out h acc => f out (isPlausibleIndirectOutput_iff_isPlausibleIndirectOutput_toIterM.mpr h) acc) := by
|
||||
simp +instances [ForIn'.forIn', Iter.instForIn', IterM.instForIn', monadLift]
|
||||
simp [ForIn'.forIn', monadLift]
|
||||
|
||||
theorem Iter.forIn_eq_forIn_toIterM {α β : Type w} [Iterator α Id β]
|
||||
[Finite α Id] {m : Type w → Type w''} [Monad m] [LawfulMonad m]
|
||||
@@ -395,7 +396,7 @@ theorem Iter.fold_eq_fold_toIterM {α β : Type w} {γ : Type w} [Iterator α Id
|
||||
[Finite α Id] [IteratorLoop α Id Id]
|
||||
{f : γ → β → γ} {init : γ} {it : Iter (α := α) β} :
|
||||
it.fold (init := init) f = (it.toIterM.fold (init := init) f).run := by
|
||||
rw [fold_eq_foldM, foldM_eq_foldM_toIterM, IterM.fold_eq_foldM]; rfl
|
||||
rw [fold_eq_foldM, foldM_eq_foldM_toIterM, IterM.fold_eq_foldM]
|
||||
|
||||
@[simp]
|
||||
theorem Iter.forIn_pure_yield_eq_fold {α β : Type w} {γ : Type x} [Iterator α Id β]
|
||||
|
||||
@@ -109,10 +109,10 @@ theorem IterM.forIn'_eq {α β : Type w} {m : Type w → Type w'} [Iterator α m
|
||||
letI : ForIn' n (IterM (α := α) m β) β _ := IterM.instForIn'
|
||||
ForIn'.forIn' (α := β) (m := n) it init f = IterM.DefaultConsumers.forIn' (n := n)
|
||||
(fun _ _ f x => monadLift x >>= f) γ (fun _ _ _ => True) it init _ (fun _ => id) (return ⟨← f · · ·, trivial⟩) := by
|
||||
simp +instances only [instForIn', ForIn'.forIn', IteratorLoop.finiteForIn']
|
||||
simp only [ForIn'.forIn']
|
||||
have : f = (Subtype.val <$> (⟨·, trivial⟩) <$> f · · ·) := by simp
|
||||
rw [this, hl.lawful (fun _ _ f x => monadLift x >>= f) (wf := IteratorLoop.wellFounded_of_finite)]
|
||||
simp +instances [IteratorLoop.defaultImplementation]
|
||||
simp [IteratorLoop.forIn]
|
||||
try rfl
|
||||
|
||||
theorem IterM.forIn_eq {α β : Type w} {m : Type w → Type w'} [Iterator α m β] [Finite α m]
|
||||
|
||||
@@ -272,6 +272,12 @@ theorem PostconditionT.run_bind' {m : Type w → Type w'} [Monad m] [LawfulMonad
|
||||
(x >>= f).run = x.run >>= (f · |>.run) :=
|
||||
run_bind
|
||||
|
||||
@[simp]
|
||||
protected theorem PostconditionT.run_pure {m : Type w → Type w'} [Monad m] [LawfulMonad m]
|
||||
{α : Type w} {x : α} :
|
||||
(pure x : PostconditionT m α).run = pure x := by
|
||||
simp [run_eq_map]
|
||||
|
||||
@[simp]
|
||||
theorem PostconditionT.property_lift {m : Type w → Type w'} [Functor m] {α : Type w}
|
||||
{x : m α} : (lift x : PostconditionT m α).Property = (fun _ => True) := by
|
||||
|
||||
@@ -32,14 +32,14 @@ def ToIterator.iter [ToIterator γ Id α β] (x : γ) : Iter (α := α) β :=
|
||||
ToIterator.iterM x |>.toIter
|
||||
|
||||
/-- Creates a monadic `ToIterator` instance. -/
|
||||
@[always_inline, inline, expose, instance_reducible]
|
||||
@[always_inline, inline, expose, implicit_reducible]
|
||||
def ToIterator.ofM (α : Type w)
|
||||
(iterM : γ → IterM (α := α) m β) :
|
||||
ToIterator γ m α β where
|
||||
iterMInternal x := iterM x
|
||||
|
||||
/-- Creates a pure `ToIterator` instance. -/
|
||||
@[always_inline, inline, expose, instance_reducible]
|
||||
@[always_inline, inline, expose, implicit_reducible]
|
||||
def ToIterator.of (α : Type w)
|
||||
(iter : γ → Iter (α := α) β) :
|
||||
ToIterator γ Id α β where
|
||||
|
||||
@@ -37,3 +37,4 @@ public import Init.Data.List.Lex
|
||||
public import Init.Data.List.Range
|
||||
public import Init.Data.List.Scan
|
||||
public import Init.Data.List.ControlImpl
|
||||
public import Init.Data.List.SplitOn
|
||||
|
||||
@@ -135,7 +135,11 @@ protected def beq [BEq α] : List α → List α → Bool
|
||||
@[simp] theorem beq_nil_nil [BEq α] : List.beq ([] : List α) ([] : List α) = true := rfl
|
||||
@[simp] theorem beq_cons_nil [BEq α] {a : α} {as : List α} : List.beq (a::as) [] = false := rfl
|
||||
@[simp] theorem beq_nil_cons [BEq α] {a : α} {as : List α} : List.beq [] (a::as) = false := rfl
|
||||
theorem beq_cons₂ [BEq α] {a b : α} {as bs : List α} : List.beq (a::as) (b::bs) = (a == b && List.beq as bs) := rfl
|
||||
theorem beq_cons_cons [BEq α] {a b : α} {as bs : List α} : List.beq (a::as) (b::bs) = (a == b && List.beq as bs) := rfl
|
||||
|
||||
@[deprecated beq_cons_cons (since := "2026-02-26")]
|
||||
theorem beq_cons₂ [BEq α] {a b : α} {as bs : List α} :
|
||||
List.beq (a::as) (b::bs) = (a == b && List.beq as bs) := beq_cons_cons
|
||||
|
||||
instance [BEq α] : BEq (List α) := ⟨List.beq⟩
|
||||
|
||||
@@ -175,7 +179,10 @@ Examples:
|
||||
@[simp, grind =] theorem isEqv_nil_nil : isEqv ([] : List α) [] eqv = true := rfl
|
||||
@[simp, grind =] theorem isEqv_nil_cons : isEqv ([] : List α) (a::as) eqv = false := rfl
|
||||
@[simp, grind =] theorem isEqv_cons_nil : isEqv (a::as : List α) [] eqv = false := rfl
|
||||
@[grind =] theorem isEqv_cons₂ : isEqv (a::as) (b::bs) eqv = (eqv a b && isEqv as bs eqv) := rfl
|
||||
@[grind =] theorem isEqv_cons_cons : isEqv (a::as) (b::bs) eqv = (eqv a b && isEqv as bs eqv) := rfl
|
||||
|
||||
@[deprecated isEqv_cons_cons (since := "2026-02-26")]
|
||||
theorem isEqv_cons₂ : isEqv (a::as) (b::bs) eqv = (eqv a b && isEqv as bs eqv) := isEqv_cons_cons
|
||||
|
||||
|
||||
/-! ## Lexicographic ordering -/
|
||||
@@ -1048,9 +1055,12 @@ def dropLast {α} : List α → List α
|
||||
@[simp, grind =] theorem dropLast_nil : ([] : List α).dropLast = [] := rfl
|
||||
@[simp, grind =] theorem dropLast_singleton : [x].dropLast = [] := rfl
|
||||
|
||||
@[simp, grind =] theorem dropLast_cons₂ :
|
||||
@[simp, grind =] theorem dropLast_cons_cons :
|
||||
(x::y::zs).dropLast = x :: (y::zs).dropLast := rfl
|
||||
|
||||
@[deprecated dropLast_cons_cons (since := "2026-02-26")]
|
||||
theorem dropLast_cons₂ : (x::y::zs).dropLast = x :: (y::zs).dropLast := dropLast_cons_cons
|
||||
|
||||
-- Later this can be proved by `simp` via `[List.length_dropLast, List.length_cons, Nat.add_sub_cancel]`,
|
||||
-- but we need this while bootstrapping `Array`.
|
||||
@[simp] theorem length_dropLast_cons {a : α} {as : List α} : (a :: as).dropLast.length = as.length := by
|
||||
@@ -1085,7 +1095,11 @@ inductive Sublist {α} : List α → List α → Prop
|
||||
/-- If `l₁` is a subsequence of `l₂`, then it is also a subsequence of `a :: l₂`. -/
|
||||
| cons a : Sublist l₁ l₂ → Sublist l₁ (a :: l₂)
|
||||
/-- If `l₁` is a subsequence of `l₂`, then `a :: l₁` is a subsequence of `a :: l₂`. -/
|
||||
| cons₂ a : Sublist l₁ l₂ → Sublist (a :: l₁) (a :: l₂)
|
||||
| cons_cons a : Sublist l₁ l₂ → Sublist (a :: l₁) (a :: l₂)
|
||||
|
||||
set_option linter.missingDocs false in
|
||||
@[deprecated Sublist.cons_cons (since := "2026-02-26"), match_pattern]
|
||||
abbrev Sublist.cons₂ := @Sublist.cons_cons
|
||||
|
||||
@[inherit_doc] scoped infixl:50 " <+ " => Sublist
|
||||
|
||||
@@ -1143,9 +1157,13 @@ def isPrefixOf [BEq α] : List α → List α → Bool
|
||||
@[simp, grind =] theorem isPrefixOf_nil_left [BEq α] : isPrefixOf ([] : List α) l = true := by
|
||||
simp [isPrefixOf]
|
||||
@[simp, grind =] theorem isPrefixOf_cons_nil [BEq α] : isPrefixOf (a::as) ([] : List α) = false := rfl
|
||||
@[grind =] theorem isPrefixOf_cons₂ [BEq α] {a : α} :
|
||||
@[grind =] theorem isPrefixOf_cons_cons [BEq α] {a : α} :
|
||||
isPrefixOf (a::as) (b::bs) = (a == b && isPrefixOf as bs) := rfl
|
||||
|
||||
@[deprecated isPrefixOf_cons_cons (since := "2026-02-26")]
|
||||
theorem isPrefixOf_cons₂ [BEq α] {a : α} :
|
||||
isPrefixOf (a::as) (b::bs) = (a == b && isPrefixOf as bs) := isPrefixOf_cons_cons
|
||||
|
||||
/--
|
||||
If the first list is a prefix of the second, returns the result of dropping the prefix.
|
||||
|
||||
@@ -2164,10 +2182,16 @@ def intersperse (sep : α) : (l : List α) → List α
|
||||
| x::xs => x :: sep :: intersperse sep xs
|
||||
|
||||
@[simp] theorem intersperse_nil {sep : α} : ([] : List α).intersperse sep = [] := rfl
|
||||
@[simp] theorem intersperse_single {x : α} {sep : α} : [x].intersperse sep = [x] := rfl
|
||||
@[simp] theorem intersperse_cons₂ {x : α} {y : α} {zs : List α} {sep : α} :
|
||||
@[simp] theorem intersperse_singleton {x : α} {sep : α} : [x].intersperse sep = [x] := rfl
|
||||
@[deprecated intersperse_singleton (since := "2026-02-26")]
|
||||
theorem intersperse_single {x : α} {sep : α} : [x].intersperse sep = [x] := rfl
|
||||
@[simp] theorem intersperse_cons_cons {x : α} {y : α} {zs : List α} {sep : α} :
|
||||
(x::y::zs).intersperse sep = x::sep::((y::zs).intersperse sep) := rfl
|
||||
|
||||
@[deprecated intersperse_cons_cons (since := "2026-02-26")]
|
||||
theorem intersperse_cons₂ {x : α} {y : α} {zs : List α} {sep : α} :
|
||||
(x::y::zs).intersperse sep = x::sep::((y::zs).intersperse sep) := intersperse_cons_cons
|
||||
|
||||
/-! ### intercalate -/
|
||||
|
||||
set_option linter.listVariables false in
|
||||
|
||||
@@ -125,7 +125,7 @@ protected theorem Sublist.eraseP : l₁ <+ l₂ → l₁.eraseP p <+ l₂.eraseP
|
||||
by_cases h : p a
|
||||
· simpa [h] using s.eraseP.trans eraseP_sublist
|
||||
· simpa [h] using s.eraseP.cons _
|
||||
| .cons₂ a s => by
|
||||
| .cons_cons a s => by
|
||||
by_cases h : p a
|
||||
· simpa [h] using s
|
||||
· simpa [h] using s.eraseP
|
||||
|
||||
@@ -184,7 +184,7 @@ theorem Sublist.findSome?_isSome {l₁ l₂ : List α} (h : l₁ <+ l₂) :
|
||||
induction h with
|
||||
| slnil => simp
|
||||
| cons a h ih
|
||||
| cons₂ a h ih =>
|
||||
| cons_cons a h ih =>
|
||||
simp only [findSome?]
|
||||
split
|
||||
· simp_all
|
||||
@@ -455,7 +455,7 @@ theorem Sublist.find?_isSome {l₁ l₂ : List α} (h : l₁ <+ l₂) : (l₁.fi
|
||||
induction h with
|
||||
| slnil => simp
|
||||
| cons a h ih
|
||||
| cons₂ a h ih =>
|
||||
| cons_cons a h ih =>
|
||||
simp only [find?]
|
||||
split
|
||||
· simp
|
||||
|
||||
@@ -236,7 +236,6 @@ theorem getElem?_eq_some_iff {l : List α} : l[i]? = some a ↔ ∃ h : i < l.le
|
||||
· match i, h with
|
||||
| i + 1, h => simp [getElem?_eq_some_iff, Nat.succ_lt_succ_iff]
|
||||
|
||||
@[grind →]
|
||||
theorem getElem_of_getElem? {l : List α} : l[i]? = some a → ∃ h : i < l.length, l[i] = a :=
|
||||
getElem?_eq_some_iff.mp
|
||||
|
||||
@@ -937,6 +936,12 @@ theorem getElem_zero_eq_head {l : List α} (h : 0 < l.length) :
|
||||
| nil => simp at h
|
||||
| cons _ _ => simp
|
||||
|
||||
theorem head!_eq_getElem! [Inhabited α] {l : List α} : head! l = l[0]! := by
|
||||
cases l <;> rfl
|
||||
|
||||
theorem headD_eq_getD {l : List α} {fallback} : headD l fallback = l.getD 0 fallback := by
|
||||
cases l <;> rfl
|
||||
|
||||
theorem head_eq_iff_head?_eq_some {xs : List α} (h) : xs.head h = a ↔ xs.head? = some a := by
|
||||
cases xs with
|
||||
| nil => simp at h
|
||||
@@ -1394,7 +1399,7 @@ theorem head_filter_of_pos {p : α → Bool} {l : List α} (w : l ≠ []) (h : p
|
||||
|
||||
@[simp] theorem filter_sublist {p : α → Bool} : ∀ {l : List α}, filter p l <+ l
|
||||
| [] => .slnil
|
||||
| a :: l => by rw [filter]; split <;> simp [Sublist.cons, Sublist.cons₂, filter_sublist]
|
||||
| a :: l => by rw [filter]; split <;> simp [Sublist.cons, Sublist.cons_cons, filter_sublist]
|
||||
|
||||
/-! ### filterMap -/
|
||||
|
||||
@@ -3154,7 +3159,7 @@ theorem dropLast_concat_getLast : ∀ {l : List α} (h : l ≠ []), dropLast l +
|
||||
| [], h => absurd rfl h
|
||||
| [_], _ => rfl
|
||||
| _ :: b :: l, _ => by
|
||||
rw [dropLast_cons₂, cons_append, getLast_cons (cons_ne_nil _ _)]
|
||||
rw [dropLast_cons_cons, cons_append, getLast_cons (cons_ne_nil _ _)]
|
||||
congr
|
||||
exact dropLast_concat_getLast (cons_ne_nil b l)
|
||||
|
||||
@@ -3774,4 +3779,28 @@ theorem get_mem : ∀ (l : List α) n, get l n ∈ l
|
||||
theorem mem_iff_get {a} {l : List α} : a ∈ l ↔ ∃ n, get l n = a :=
|
||||
⟨get_of_mem, fun ⟨_, e⟩ => e ▸ get_mem ..⟩
|
||||
|
||||
/-! ### `intercalate` -/
|
||||
|
||||
@[simp]
|
||||
theorem intercalate_nil {ys : List α} : ys.intercalate [] = [] := rfl
|
||||
|
||||
@[simp]
|
||||
theorem intercalate_singleton {ys xs : List α} : ys.intercalate [xs] = xs := by
|
||||
simp [intercalate]
|
||||
|
||||
@[simp]
|
||||
theorem intercalate_cons_cons {ys l l' : List α} {zs : List (List α)} :
|
||||
ys.intercalate (l :: l' :: zs) = l ++ ys ++ ys.intercalate (l' :: zs) := by
|
||||
simp [intercalate]
|
||||
|
||||
@[simp]
|
||||
theorem intercalate_cons_cons_left {ys l : List α} {x : α} {zs : List (List α)} :
|
||||
ys.intercalate ((x :: l) :: zs) = x :: ys.intercalate (l :: zs) := by
|
||||
cases zs <;> simp
|
||||
|
||||
theorem intercalate_cons_of_ne_nil {ys l : List α} {zs : List (List α)} (h : zs ≠ []) :
|
||||
ys.intercalate (l :: zs) = l ++ ys ++ ys.intercalate zs :=
|
||||
match zs, h with
|
||||
| l'::zs, _ => by simp
|
||||
|
||||
end List
|
||||
|
||||
@@ -42,7 +42,7 @@ theorem beq_eq_isEqv [BEq α] {as bs : List α} : as.beq bs = isEqv as bs (· ==
|
||||
cases bs with
|
||||
| nil => simp
|
||||
| cons b bs =>
|
||||
simp only [beq_cons₂, ih, isEqv_eq_decide, length_cons, Nat.add_right_cancel_iff,
|
||||
simp only [beq_cons_cons, ih, isEqv_eq_decide, length_cons, Nat.add_right_cancel_iff,
|
||||
Nat.forall_lt_succ_left', getElem_cons_zero, getElem_cons_succ, Bool.decide_and,
|
||||
Bool.decide_eq_true]
|
||||
split <;> simp
|
||||
|
||||
@@ -106,7 +106,7 @@ theorem Sublist.le_countP (s : l₁ <+ l₂) (p) : countP p l₂ - (l₂.length
|
||||
have := s.le_countP p
|
||||
have := s.length_le
|
||||
split <;> omega
|
||||
| .cons₂ a s =>
|
||||
| .cons_cons a s =>
|
||||
rename_i l₁ l₂
|
||||
simp only [countP_cons, length_cons]
|
||||
have := s.le_countP p
|
||||
|
||||
@@ -38,7 +38,7 @@ theorem map_getElem_sublist {l : List α} {is : List (Fin l.length)} (h : is.Pai
|
||||
simp only [Fin.getElem_fin, map_cons]
|
||||
have := IH h.of_cons (hd+1) (pairwise_cons.mp h).1
|
||||
specialize his hd (.head _)
|
||||
have := (drop_eq_getElem_cons ..).symm ▸ this.cons₂ (get l hd)
|
||||
have := (drop_eq_getElem_cons ..).symm ▸ this.cons_cons (get l hd)
|
||||
have := Sublist.append (nil_sublist (take hd l |>.drop j)) this
|
||||
rwa [nil_append, ← (drop_append_of_le_length ?_), take_append_drop] at this
|
||||
simp [Nat.min_eq_left (Nat.le_of_lt hd.isLt), his]
|
||||
@@ -55,7 +55,7 @@ theorem sublist_eq_map_getElem {l l' : List α} (h : l' <+ l) : ∃ is : List (F
|
||||
refine ⟨is.map (·.succ), ?_⟩
|
||||
set_option backward.isDefEq.respectTransparency false in
|
||||
simpa [Function.comp_def, pairwise_map]
|
||||
| cons₂ _ _ IH =>
|
||||
| cons_cons _ _ IH =>
|
||||
rcases IH with ⟨is,IH⟩
|
||||
refine ⟨⟨0, by simp [Nat.zero_lt_succ]⟩ :: is.map (·.succ), ?_⟩
|
||||
set_option backward.isDefEq.respectTransparency false in
|
||||
|
||||
@@ -207,7 +207,7 @@ theorem take_eq_dropLast {l : List α} {i : Nat} (h : i + 1 = l.length) :
|
||||
· cases as with
|
||||
| nil => simp_all
|
||||
| cons b bs =>
|
||||
simp only [take_succ_cons, dropLast_cons₂]
|
||||
simp only [take_succ_cons, dropLast_cons_cons]
|
||||
rw [ih]
|
||||
simpa using h
|
||||
|
||||
|
||||
@@ -33,7 +33,7 @@ open Nat
|
||||
@[grind →] theorem Pairwise.sublist : l₁ <+ l₂ → l₂.Pairwise R → l₁.Pairwise R
|
||||
| .slnil, h => h
|
||||
| .cons _ s, .cons _ h₂ => h₂.sublist s
|
||||
| .cons₂ _ s, .cons h₁ h₂ => (h₂.sublist s).cons fun _ h => h₁ _ (s.subset h)
|
||||
| .cons_cons _ s, .cons h₁ h₂ => (h₂.sublist s).cons fun _ h => h₁ _ (s.subset h)
|
||||
|
||||
theorem Pairwise.imp {α R S} (H : ∀ {a b}, R a b → S a b) :
|
||||
∀ {l : List α}, l.Pairwise R → l.Pairwise S
|
||||
@@ -226,7 +226,7 @@ theorem pairwise_iff_forall_sublist : l.Pairwise R ↔ (∀ {a b}, [a,b] <+ l
|
||||
constructor <;> intro h
|
||||
· intro
|
||||
| a, b, .cons _ hab => exact IH.mp h.2 hab
|
||||
| _, b, .cons₂ _ hab => refine h.1 _ (hab.subset ?_); simp
|
||||
| _, b, .cons_cons _ hab => refine h.1 _ (hab.subset ?_); simp
|
||||
· constructor
|
||||
· intro x hx
|
||||
apply h
|
||||
@@ -304,26 +304,43 @@ grind_pattern Nodup.sublist => l₁ <+ l₂, Nodup l₂
|
||||
theorem Sublist.nodup : l₁ <+ l₂ → Nodup l₂ → Nodup l₁ :=
|
||||
Nodup.sublist
|
||||
|
||||
theorem getElem?_inj {xs : List α}
|
||||
(h₀ : i < xs.length) (h₁ : Nodup xs) (h₂ : xs[i]? = xs[j]?) : i = j := by
|
||||
induction xs generalizing i j with
|
||||
| nil => cases h₀
|
||||
| cons x xs ih =>
|
||||
match i, j with
|
||||
| 0, 0 => rfl
|
||||
| i+1, j+1 =>
|
||||
cases h₁ with
|
||||
| cons ha h₁ =>
|
||||
simp only [getElem?_cons_succ] at h₂
|
||||
exact congrArg (· + 1) (ih (Nat.lt_of_succ_lt_succ h₀) h₁ h₂)
|
||||
| i+1, 0 => ?_
|
||||
| 0, j+1 => ?_
|
||||
all_goals
|
||||
simp only [getElem?_cons_zero, getElem?_cons_succ] at h₂
|
||||
cases h₁; rename_i h' h
|
||||
have := h x ?_ rfl; cases this
|
||||
rw [mem_iff_getElem?]
|
||||
exact ⟨_, h₂⟩; exact ⟨_ , h₂.symm⟩
|
||||
theorem getElem?_inj {l : List α} (h₀ : i < l.length) (h₁ : List.Nodup l) :
|
||||
l[i]? = l[j]? ↔ i = j :=
|
||||
⟨by
|
||||
intro h₂
|
||||
induction l generalizing i j with
|
||||
| nil => cases h₀
|
||||
| cons x xs ih =>
|
||||
match i, j with
|
||||
| 0, 0 => rfl
|
||||
| i+1, j+1 =>
|
||||
cases h₁ with
|
||||
| cons ha h₁ =>
|
||||
simp only [getElem?_cons_succ] at h₂
|
||||
exact congrArg (· + 1) (ih (Nat.lt_of_succ_lt_succ h₀) h₁ h₂)
|
||||
| i+1, 0 => ?_
|
||||
| 0, j+1 => ?_
|
||||
all_goals
|
||||
simp only [getElem?_cons_zero, getElem?_cons_succ] at h₂
|
||||
cases h₁; rename_i h' h
|
||||
have := h x ?_ rfl; cases this
|
||||
rw [mem_iff_getElem?]
|
||||
exact ⟨_, h₂⟩; exact ⟨_ , h₂.symm⟩
|
||||
, by simp +contextual⟩
|
||||
|
||||
theorem getElem_inj {xs : List α}
|
||||
{h₀ : i < xs.length} {h₁ : j < xs.length} (h : Nodup xs) : xs[i] = xs[j] ↔ i = j := by
|
||||
simpa only [List.getElem_eq_getElem?_get, Option.get_inj] using getElem?_inj h₀ h
|
||||
|
||||
theorem getD_inj {xs : List α}
|
||||
(h₀ : i < xs.length) (h₁ : j < xs.length) (h₂ : Nodup xs) :
|
||||
xs.getD i fallback = xs.getD j fallback ↔ i = j := by
|
||||
simp only [List.getD_eq_getElem?_getD]
|
||||
rw [Option.getD_inj, getElem?_inj] <;> simpa
|
||||
|
||||
theorem getElem!_inj [Inhabited α] {xs : List α}
|
||||
(h₀ : i < xs.length) (h₁ : j < xs.length) (h₂ : Nodup xs) : xs[i]! = xs[j]! ↔ i = j := by
|
||||
simpa only [getElem!_eq_getElem?_getD, ← getD_eq_getElem?_getD] using getD_inj h₀ h₁ h₂
|
||||
|
||||
@[simp, grind =] theorem nodup_replicate {n : Nat} {a : α} :
|
||||
(replicate n a).Nodup ↔ n ≤ 1 := by simp [Nodup]
|
||||
|
||||
@@ -252,13 +252,13 @@ theorem exists_perm_sublist {l₁ l₂ l₂' : List α} (s : l₁ <+ l₂) (p :
|
||||
| cons x _ IH =>
|
||||
match s with
|
||||
| .cons _ s => let ⟨l₁', p', s'⟩ := IH s; exact ⟨l₁', p', s'.cons _⟩
|
||||
| .cons₂ _ s => let ⟨l₁', p', s'⟩ := IH s; exact ⟨x :: l₁', p'.cons x, s'.cons₂ _⟩
|
||||
| .cons_cons _ s => let ⟨l₁', p', s'⟩ := IH s; exact ⟨x :: l₁', p'.cons x, s'.cons_cons _⟩
|
||||
| swap x y l' =>
|
||||
match s with
|
||||
| .cons _ (.cons _ s) => exact ⟨_, .rfl, (s.cons _).cons _⟩
|
||||
| .cons _ (.cons₂ _ s) => exact ⟨x :: _, .rfl, (s.cons _).cons₂ _⟩
|
||||
| .cons₂ _ (.cons _ s) => exact ⟨y :: _, .rfl, (s.cons₂ _).cons _⟩
|
||||
| .cons₂ _ (.cons₂ _ s) => exact ⟨x :: y :: _, .swap .., (s.cons₂ _).cons₂ _⟩
|
||||
| .cons _ (.cons_cons _ s) => exact ⟨x :: _, .rfl, (s.cons _).cons_cons _⟩
|
||||
| .cons_cons _ (.cons _ s) => exact ⟨y :: _, .rfl, (s.cons_cons _).cons _⟩
|
||||
| .cons_cons _ (.cons_cons _ s) => exact ⟨x :: y :: _, .swap .., (s.cons_cons _).cons_cons _⟩
|
||||
| trans _ _ IH₁ IH₂ =>
|
||||
let ⟨_, pm, sm⟩ := IH₁ s
|
||||
let ⟨r₁, pr, sr⟩ := IH₂ sm
|
||||
@@ -277,7 +277,7 @@ theorem Sublist.exists_perm_append {l₁ l₂ : List α} : l₁ <+ l₂ → ∃
|
||||
| Sublist.cons a s =>
|
||||
let ⟨l, p⟩ := Sublist.exists_perm_append s
|
||||
⟨a :: l, (p.cons a).trans perm_middle.symm⟩
|
||||
| Sublist.cons₂ a s =>
|
||||
| Sublist.cons_cons a s =>
|
||||
let ⟨l, p⟩ := Sublist.exists_perm_append s
|
||||
⟨l, p.cons a⟩
|
||||
|
||||
|
||||
@@ -452,7 +452,7 @@ theorem sublist_mergeSort
|
||||
have h' := sublist_mergeSort trans total hc h
|
||||
rw [h₂] at h'
|
||||
exact h'.middle a
|
||||
| _, _, @Sublist.cons₂ _ l₁ l₂ a h => by
|
||||
| _, _, @Sublist.cons_cons _ l₁ l₂ a h => by
|
||||
rename_i hc
|
||||
obtain ⟨l₃, l₄, h₁, h₂, h₃⟩ := mergeSort_cons trans total a l₂
|
||||
rw [h₁]
|
||||
@@ -460,7 +460,7 @@ theorem sublist_mergeSort
|
||||
rw [h₂] at h'
|
||||
simp only [Bool.not_eq_true', tail_cons] at h₃ h'
|
||||
exact
|
||||
sublist_append_of_sublist_right (Sublist.cons₂ a
|
||||
sublist_append_of_sublist_right (Sublist.cons_cons a
|
||||
((fun w => Sublist.of_sublist_append_right w h') fun b m₁ m₃ =>
|
||||
(Bool.eq_not_self true).mp ((rel_of_pairwise_cons hc m₁).symm.trans (h₃ b m₃))))
|
||||
|
||||
|
||||
10
src/Init/Data/List/SplitOn.lean
Normal file
10
src/Init/Data/List/SplitOn.lean
Normal file
@@ -0,0 +1,10 @@
|
||||
/-
|
||||
Copyright (c) 2026 Lean FRO, LLC. All rights reserved.
|
||||
Released under Apache 2.0 license as described in the file LICENSE.
|
||||
Authors: Markus Himmel
|
||||
-/
|
||||
module
|
||||
|
||||
prelude
|
||||
public import Init.Data.List.SplitOn.Basic
|
||||
public import Init.Data.List.SplitOn.Lemmas
|
||||
70
src/Init/Data/List/SplitOn/Basic.lean
Normal file
70
src/Init/Data/List/SplitOn/Basic.lean
Normal file
@@ -0,0 +1,70 @@
|
||||
/-
|
||||
Copyright (c) 2016 Microsoft Corporation. All rights reserved.
|
||||
Released under Apache 2.0 license as described in the file LICENSE.
|
||||
Authors: Leonardo de Moura
|
||||
-/
|
||||
module
|
||||
|
||||
prelude
|
||||
public import Init.Data.List.Basic
|
||||
public import Init.NotationExtra
|
||||
import Init.Data.Array.Bootstrap
|
||||
import Init.Data.List.Lemmas
|
||||
|
||||
public section
|
||||
|
||||
set_option doc.verso true
|
||||
|
||||
namespace List
|
||||
|
||||
/--
|
||||
Split a list at every element satisfying a predicate, and then prepend {lean}`acc.reverse` to the
|
||||
first element of the result.
|
||||
|
||||
* {lean}`[1, 1, 2, 3, 2, 4, 4].splitOnPPrepend (· == 2) [0, 5] = [[5, 0, 1, 1], [3], [4, 4]]`
|
||||
-/
|
||||
noncomputable def splitOnPPrepend (p : α → Bool) : (l : List α) → (acc : List α) → List (List α)
|
||||
| [], acc => [acc.reverse]
|
||||
| a :: t, acc => if p a then acc.reverse :: splitOnPPrepend p t [] else splitOnPPrepend p t (a::acc)
|
||||
|
||||
/--
|
||||
Split a list at every element satisfying a predicate. The separators are not in the result.
|
||||
|
||||
Examples:
|
||||
* {lean}`[1, 1, 2, 3, 2, 4, 4].splitOnP (· == 2) = [[1, 1], [3], [4, 4]]`
|
||||
-/
|
||||
noncomputable def splitOnP (p : α → Bool) (l : List α) : List (List α) :=
|
||||
splitOnPPrepend p l []
|
||||
|
||||
@[deprecated splitOnPPrepend (since := "2026-02-26")]
|
||||
noncomputable def splitOnP.go (p : α → Bool) (l acc : List α) : List (List α) :=
|
||||
splitOnPPrepend p l acc
|
||||
|
||||
/-- Tail recursive version of {name}`splitOnP`. -/
|
||||
@[inline]
|
||||
def splitOnPTR (p : α → Bool) (l : List α) : List (List α) := go l #[] #[] where
|
||||
@[specialize] go : List α → Array α → Array (List α) → List (List α)
|
||||
| [], acc, r => r.toListAppend [acc.toList]
|
||||
| a :: t, acc, r => bif p a then go t #[] (r.push acc.toList) else go t (acc.push a) r
|
||||
|
||||
@[csimp] theorem splitOnP_eq_splitOnPTR : @splitOnP = @splitOnPTR := by
|
||||
funext α P l
|
||||
simp only [splitOnPTR]
|
||||
suffices ∀ xs acc r,
|
||||
splitOnPTR.go P xs acc r = r.toList ++ splitOnPPrepend P xs acc.toList.reverse from
|
||||
(this l #[] #[]).symm
|
||||
intro xs acc r
|
||||
induction xs generalizing acc r with
|
||||
| nil => simp [splitOnPPrepend, splitOnPTR.go]
|
||||
| cons x xs IH => cases h : P x <;> simp [splitOnPPrepend, splitOnPTR.go, *]
|
||||
|
||||
/--
|
||||
Split a list at every occurrence of a separator element. The separators are not in the result.
|
||||
|
||||
Examples:
|
||||
* {lean}`[1, 1, 2, 3, 2, 4, 4].splitOn 2 = [[1, 1], [3], [4, 4]]`
|
||||
-/
|
||||
@[inline] def splitOn [BEq α] (a : α) (as : List α) : List (List α) :=
|
||||
as.splitOnP (· == a)
|
||||
|
||||
end List
|
||||
208
src/Init/Data/List/SplitOn/Lemmas.lean
Normal file
208
src/Init/Data/List/SplitOn/Lemmas.lean
Normal file
@@ -0,0 +1,208 @@
|
||||
/-
|
||||
Copyright (c) 2014 Parikshit Khanna. All rights reserved.
|
||||
Released under Apache 2.0 license as described in the file LICENSE.
|
||||
Authors: Parikshit Khanna, Jeremy Avigad, Leonardo de Moura, Floris van Doorn, Mario Carneiro, Markus Himmel
|
||||
-/
|
||||
module
|
||||
|
||||
prelude
|
||||
public import Init.Data.List.SplitOn.Basic
|
||||
import all Init.Data.List.SplitOn.Basic
|
||||
import Init.Data.List.Nat.Modify
|
||||
import Init.ByCases
|
||||
|
||||
public section
|
||||
|
||||
namespace List
|
||||
|
||||
variable {p : α → Bool} {xs : List α} {ls : List (List α)}
|
||||
|
||||
@[simp]
|
||||
theorem splitOn_nil [BEq α] (a : α) : [].splitOn a = [[]] :=
|
||||
(rfl)
|
||||
|
||||
@[simp]
|
||||
theorem splitOnP_nil : [].splitOnP p = [[]] :=
|
||||
(rfl)
|
||||
|
||||
@[simp]
|
||||
theorem splitOnPPrepend_ne_nil (p : α → Bool) (xs acc : List α) : splitOnPPrepend p xs acc ≠ [] := by
|
||||
fun_induction splitOnPPrepend <;> simp_all
|
||||
|
||||
@[deprecated splitOnPPrepend_ne_nil (since := "2026-02-26")]
|
||||
theorem splitOnP.go_ne_nil (p : α → Bool) (xs acc : List α) : splitOnPPrepend p xs acc ≠ [] :=
|
||||
splitOnPPrepend_ne_nil p xs acc
|
||||
|
||||
@[simp] theorem splitOnPPrepend_nil {acc : List α} : splitOnPPrepend p [] acc = [acc.reverse] := (rfl)
|
||||
@[simp] theorem splitOnPPrepend_nil_right : splitOnPPrepend p xs [] = splitOnP p xs := (rfl)
|
||||
theorem splitOnP_eq_splitOnPPrepend : splitOnP p xs = splitOnPPrepend p xs [] := (rfl)
|
||||
|
||||
theorem splitOnPPrepend_cons_eq_if {x : α} {xs acc : List α} :
|
||||
splitOnPPrepend p (x :: xs) acc =
|
||||
if p x then acc.reverse :: splitOnP p xs else splitOnPPrepend p xs (x :: acc) := by
|
||||
simp [splitOnPPrepend]
|
||||
|
||||
theorem splitOnPPrepend_cons_pos {p : α → Bool} {a : α} {l acc : List α} (h : p a) :
|
||||
splitOnPPrepend p (a :: l) acc = acc.reverse :: splitOnP p l := by
|
||||
simp [splitOnPPrepend, h]
|
||||
|
||||
theorem splitOnPPrepend_cons_neg {p : α → Bool} {a : α} {l acc : List α} (h : p a = false) :
|
||||
splitOnPPrepend p (a :: l) acc = splitOnPPrepend p l (a :: acc) := by
|
||||
simp [splitOnPPrepend, h]
|
||||
|
||||
theorem splitOnP_cons_eq_if_splitOnPPrepend {x : α} {xs : List α} :
|
||||
splitOnP p (x :: xs) = if p x then [] :: splitOnP p xs else splitOnPPrepend p xs [x] := by
|
||||
simp [splitOnPPrepend_cons_eq_if, ← splitOnPPrepend_nil_right]
|
||||
|
||||
theorem splitOnPPrepend_eq_modifyHead {xs acc : List α} :
|
||||
splitOnPPrepend p xs acc = modifyHead (acc.reverse ++ ·) (splitOnP p xs) := by
|
||||
induction xs generalizing acc with
|
||||
| nil => simp
|
||||
| cons hd tl ih =>
|
||||
simp [splitOnPPrepend_cons_eq_if, splitOnP_cons_eq_if_splitOnPPrepend, ih]
|
||||
split <;> simp <;> congr
|
||||
|
||||
@[deprecated splitOnPPrepend_eq_modifyHead (since := "2026-02-26")]
|
||||
theorem splitOnP.go_acc {xs acc : List α} :
|
||||
splitOnPPrepend p xs acc = modifyHead (acc.reverse ++ ·) (splitOnP p xs) :=
|
||||
splitOnPPrepend_eq_modifyHead
|
||||
|
||||
@[simp]
|
||||
theorem splitOnP_ne_nil (p : α → Bool) (xs : List α) : xs.splitOnP p ≠ [] :=
|
||||
splitOnPPrepend_ne_nil p xs []
|
||||
|
||||
theorem splitOnP_cons_eq_if_modifyHead (x : α) (xs : List α) :
|
||||
(x :: xs).splitOnP p =
|
||||
if p x then [] :: xs.splitOnP p else (xs.splitOnP p).modifyHead (cons x) := by
|
||||
simp [splitOnP_cons_eq_if_splitOnPPrepend, splitOnPPrepend_eq_modifyHead]
|
||||
|
||||
@[deprecated splitOnP_cons_eq_if_modifyHead (since := "2026-02-26")]
|
||||
theorem splitOnP_cons (x : α) (xs : List α) :
|
||||
(x :: xs).splitOnP p =
|
||||
if p x then [] :: xs.splitOnP p else (xs.splitOnP p).modifyHead (cons x) :=
|
||||
splitOnP_cons_eq_if_modifyHead x xs
|
||||
|
||||
/-- The original list `L` can be recovered by flattening the lists produced by `splitOnP p L`,
|
||||
interspersed with the elements `L.filter p`. -/
|
||||
theorem splitOnP_spec (as : List α) :
|
||||
flatten (zipWith (· ++ ·) (splitOnP p as) (((as.filter p).map fun x => [x]) ++ [[]])) = as := by
|
||||
induction as with
|
||||
| nil => simp
|
||||
| cons a as' ih =>
|
||||
rw [splitOnP_cons_eq_if_modifyHead]
|
||||
split <;> simp [*, flatten_zipWith, splitOnP_ne_nil]
|
||||
where
|
||||
flatten_zipWith {xs ys : List (List α)} {a : α} (hxs : xs ≠ []) (hys : ys ≠ []) :
|
||||
flatten (zipWith (fun x x_1 => x ++ x_1) (modifyHead (cons a) xs) ys) =
|
||||
a :: flatten (zipWith (fun x x_1 => x ++ x_1) xs ys) := by
|
||||
cases xs <;> cases ys <;> simp_all
|
||||
|
||||
/-- If no element satisfies `p` in the list `xs`, then `xs.splitOnP p = [xs]` -/
|
||||
theorem splitOnP_eq_singleton (h : ∀ x ∈ xs, p x = false) : xs.splitOnP p = [xs] := by
|
||||
induction xs with
|
||||
| nil => simp
|
||||
| cons hd tl ih =>
|
||||
simp only [mem_cons, forall_eq_or_imp] at h
|
||||
simp [splitOnP_cons_eq_if_modifyHead, h.1, ih h.2]
|
||||
|
||||
@[deprecated splitOnP_eq_singleton (since := "2026-02-26")]
|
||||
theorem splitOnP_eq_single (h : ∀ x ∈ xs, p x = false) : xs.splitOnP p = [xs] :=
|
||||
splitOnP_eq_singleton h
|
||||
|
||||
/-- When a list of the form `[...xs, sep, ...as]` is split at the `sep` element satisfying `p`,
|
||||
the result is the concatenation of `splitOnP` called on `xs` and `as` -/
|
||||
theorem splitOnP_append_cons (xs as : List α) {sep : α} (hsep : p sep) :
|
||||
(xs ++ sep :: as).splitOnP p = List.splitOnP p xs ++ List.splitOnP p as := by
|
||||
induction xs with
|
||||
| nil => simp [splitOnP_cons_eq_if_modifyHead, hsep]
|
||||
| cons hd tl ih =>
|
||||
obtain ⟨hd1, tl1, h1'⟩ := List.exists_cons_of_ne_nil (List.splitOnP_ne_nil (p := p) (xs := tl))
|
||||
by_cases hPh : p hd <;> simp [splitOnP_cons_eq_if_modifyHead, *]
|
||||
|
||||
/-- When a list of the form `[...xs, sep, ...as]` is split on `p`, the first element is `xs`,
|
||||
assuming no element in `xs` satisfies `p` but `sep` does satisfy `p` -/
|
||||
theorem splitOnP_append_cons_of_forall_mem (h : ∀ x ∈ xs, p x = false) (sep : α)
|
||||
(hsep : p sep = true) (as : List α) : (xs ++ sep :: as).splitOnP p = xs :: as.splitOnP p := by
|
||||
rw [splitOnP_append_cons xs as hsep, splitOnP_eq_singleton h, singleton_append]
|
||||
|
||||
@[deprecated splitOnP_append_cons_of_forall_mem (since := "2026-02-26")]
|
||||
theorem splitOnP_first (h : ∀ x ∈ xs, p x = false) (sep : α)
|
||||
(hsep : p sep = true) (as : List α) : (xs ++ sep :: as).splitOnP p = xs :: as.splitOnP p :=
|
||||
splitOnP_append_cons_of_forall_mem h sep hsep as
|
||||
|
||||
theorem splitOn_eq_splitOnP [BEq α] {x : α} {xs : List α} : xs.splitOn x = xs.splitOnP (· == x) :=
|
||||
(rfl)
|
||||
|
||||
@[simp]
|
||||
theorem splitOn_ne_nil [BEq α] (a : α) (xs : List α) : xs.splitOn a ≠ [] := by
|
||||
simp [splitOn_eq_splitOnP]
|
||||
|
||||
theorem splitOn_cons_eq_if_modifyHead [BEq α] {a : α} (x : α) (xs : List α) :
|
||||
(x :: xs).splitOn a =
|
||||
if x == a then [] :: xs.splitOn a else (xs.splitOn a).modifyHead (cons x) := by
|
||||
simpa [splitOn_eq_splitOnP] using splitOnP_cons_eq_if_modifyHead ..
|
||||
|
||||
/-- If no element satisfies `p` in the list `xs`, then `xs.splitOnP p = [xs]` -/
|
||||
theorem splitOn_eq_singleton_of_beq_eq_false [BEq α] {a : α} (h : ∀ x ∈ xs, (x == a) = false) :
|
||||
xs.splitOn a = [xs] := by
|
||||
simpa [splitOn_eq_splitOnP] using splitOnP_eq_singleton h
|
||||
|
||||
theorem splitOn_eq_singleton [BEq α] [LawfulBEq α] {a : α} (h : a ∉ xs) :
|
||||
xs.splitOn a = [xs] :=
|
||||
splitOn_eq_singleton_of_beq_eq_false
|
||||
(fun _ hb => beq_eq_false_iff_ne.2 (fun hab => absurd hb (hab ▸ h)))
|
||||
|
||||
/-- When a list of the form `[...xs, sep, ...as]` is split at the `sep` element equal to `a`,
|
||||
the result is the concatenation of `splitOnP` called on `xs` and `as` -/
|
||||
theorem splitOn_append_cons_of_beq [BEq α] {a : α} (xs as : List α) {sep : α} (hsep : sep == a) :
|
||||
(xs ++ sep :: as).splitOn a = List.splitOn a xs ++ List.splitOn a as := by
|
||||
simpa [splitOn_eq_splitOnP] using splitOnP_append_cons (p := (· == a)) _ _ hsep
|
||||
|
||||
/-- When a list of the form `[...xs, sep, ...as]` is split at `a`,
|
||||
the result is the concatenation of `splitOnP` called on `xs` and `as` -/
|
||||
theorem splitOn_append_cons_self [BEq α] [ReflBEq α] {a : α} (xs as : List α) :
|
||||
(xs ++ a :: as).splitOn a = List.splitOn a xs ++ List.splitOn a as :=
|
||||
splitOn_append_cons_of_beq _ _ (BEq.refl _)
|
||||
|
||||
/-- When a list of the form `[...xs, sep, ...as]` is split at `a`, the first element is `xs`,
|
||||
assuming no element in `xs` is equal to `a` but `sep` is equal to `a`. -/
|
||||
theorem splitOn_append_cons_of_forall_mem_beq_eq_false [BEq α] {a : α}
|
||||
(h : ∀ x ∈ xs, (x == a) = false) (sep : α)
|
||||
(hsep : sep == a) (as : List α) : (xs ++ sep :: as).splitOn a = xs :: as.splitOn a := by
|
||||
simpa [splitOn_eq_splitOnP] using splitOnP_append_cons_of_forall_mem h _ hsep _
|
||||
|
||||
/-- When a list of the form `[...xs, a, ...as]` is split at `a`, the first element is `xs`,
|
||||
assuming no element in `xs` is equal to `a`. -/
|
||||
theorem splitOn_append_cons_self_of_not_mem [BEq α] [LawfulBEq α] {a : α}
|
||||
(h : a ∉ xs) (as : List α) : (xs ++ a :: as).splitOn a = xs :: as.splitOn a :=
|
||||
splitOn_append_cons_of_forall_mem_beq_eq_false
|
||||
(fun b hb => beq_eq_false_iff_ne.2 fun hab => absurd hb (hab ▸ h)) _ (by simp) _
|
||||
|
||||
/-- `intercalate [x]` is the left inverse of `splitOn x` -/
|
||||
@[simp]
|
||||
theorem intercalate_splitOn [BEq α] [LawfulBEq α] (x : α) : [x].intercalate (xs.splitOn x) = xs := by
|
||||
induction xs with
|
||||
| nil => simp
|
||||
| cons hd tl ih =>
|
||||
simp only [splitOn_cons_eq_if_modifyHead, beq_iff_eq]
|
||||
split
|
||||
· simp_all [intercalate_cons_of_ne_nil, splitOn_ne_nil]
|
||||
· have hsp := splitOn_ne_nil x tl
|
||||
generalize splitOn x tl = ls at *
|
||||
cases ls <;> simp_all
|
||||
|
||||
/-- `splitOn x` is the left inverse of `intercalate [x]`, on the domain
|
||||
consisting of each nonempty list of lists `ls` whose elements do not contain `x` -/
|
||||
theorem splitOn_intercalate [BEq α] [LawfulBEq α] (x : α) (hx : ∀ l ∈ ls, x ∉ l) (hls : ls ≠ []) :
|
||||
([x].intercalate ls).splitOn x = ls := by
|
||||
induction ls with
|
||||
| nil => simp at hls
|
||||
| cons hd tl ih =>
|
||||
simp only [mem_cons, forall_eq_or_imp] at ⊢ hx
|
||||
match tl with
|
||||
| [] => simpa using splitOn_eq_singleton hx.1
|
||||
| t::tl =>
|
||||
simp only [intercalate_cons_cons, append_assoc, cons_append, nil_append]
|
||||
rw [splitOn_append_cons_self_of_not_mem hx.1, ih hx.2 (by simp)]
|
||||
|
||||
end List
|
||||
@@ -32,8 +32,12 @@ open Nat
|
||||
section isPrefixOf
|
||||
variable [BEq α]
|
||||
|
||||
@[simp, grind =] theorem isPrefixOf_cons₂_self [LawfulBEq α] {a : α} :
|
||||
isPrefixOf (a::as) (a::bs) = isPrefixOf as bs := by simp [isPrefixOf_cons₂]
|
||||
@[simp, grind =] theorem isPrefixOf_cons_cons_self [LawfulBEq α] {a : α} :
|
||||
isPrefixOf (a::as) (a::bs) = isPrefixOf as bs := by simp [isPrefixOf_cons_cons]
|
||||
|
||||
@[deprecated isPrefixOf_cons_cons_self (since := "2026-02-26")]
|
||||
theorem isPrefixOf_cons₂_self [LawfulBEq α] {a : α} :
|
||||
isPrefixOf (a::as) (a::bs) = isPrefixOf as bs := isPrefixOf_cons_cons_self
|
||||
|
||||
@[simp] theorem isPrefixOf_length_pos_nil {l : List α} (h : 0 < l.length) : isPrefixOf l [] = false := by
|
||||
cases l <;> simp_all [isPrefixOf]
|
||||
@@ -45,7 +49,7 @@ variable [BEq α]
|
||||
| cons _ _ ih =>
|
||||
cases n
|
||||
· simp
|
||||
· simp [replicate_succ, isPrefixOf_cons₂, ih, Nat.succ_le_succ_iff, Bool.and_left_comm]
|
||||
· simp [replicate_succ, isPrefixOf_cons_cons, ih, Nat.succ_le_succ_iff, Bool.and_left_comm]
|
||||
|
||||
end isPrefixOf
|
||||
|
||||
@@ -169,18 +173,18 @@ theorem subset_replicate {n : Nat} {a : α} {l : List α} (h : n ≠ 0) : l ⊆
|
||||
|
||||
@[simp, grind ←] theorem Sublist.refl : ∀ l : List α, l <+ l
|
||||
| [] => .slnil
|
||||
| a :: l => (Sublist.refl l).cons₂ a
|
||||
| a :: l => (Sublist.refl l).cons_cons a
|
||||
|
||||
theorem Sublist.trans {l₁ l₂ l₃ : List α} (h₁ : l₁ <+ l₂) (h₂ : l₂ <+ l₃) : l₁ <+ l₃ := by
|
||||
induction h₂ generalizing l₁ with
|
||||
| slnil => exact h₁
|
||||
| cons _ _ IH => exact (IH h₁).cons _
|
||||
| @cons₂ l₂ _ a _ IH =>
|
||||
| @cons_cons l₂ _ a _ IH =>
|
||||
generalize e : a :: l₂ = l₂' at h₁
|
||||
match h₁ with
|
||||
| .slnil => apply nil_sublist
|
||||
| .cons a' h₁' => cases e; apply (IH h₁').cons
|
||||
| .cons₂ a' h₁' => cases e; apply (IH h₁').cons₂
|
||||
| .cons_cons a' h₁' => cases e; apply (IH h₁').cons_cons
|
||||
|
||||
instance : Trans (@Sublist α) Sublist Sublist := ⟨Sublist.trans⟩
|
||||
|
||||
@@ -193,23 +197,23 @@ theorem sublist_of_cons_sublist : a :: l₁ <+ l₂ → l₁ <+ l₂ :=
|
||||
|
||||
@[simp, grind =]
|
||||
theorem cons_sublist_cons : a :: l₁ <+ a :: l₂ ↔ l₁ <+ l₂ :=
|
||||
⟨fun | .cons _ s => sublist_of_cons_sublist s | .cons₂ _ s => s, .cons₂ _⟩
|
||||
⟨fun | .cons _ s => sublist_of_cons_sublist s | .cons_cons _ s => s, .cons_cons _⟩
|
||||
|
||||
theorem sublist_or_mem_of_sublist (h : l <+ l₁ ++ a :: l₂) : l <+ l₁ ++ l₂ ∨ a ∈ l := by
|
||||
induction l₁ generalizing l with
|
||||
| nil => match h with
|
||||
| .cons _ h => exact .inl h
|
||||
| .cons₂ _ h => exact .inr (.head ..)
|
||||
| .cons_cons _ h => exact .inr (.head ..)
|
||||
| cons b l₁ IH =>
|
||||
match h with
|
||||
| .cons _ h => exact (IH h).imp_left (Sublist.cons _)
|
||||
| .cons₂ _ h => exact (IH h).imp (Sublist.cons₂ _) (.tail _)
|
||||
| .cons_cons _ h => exact (IH h).imp (Sublist.cons_cons _) (.tail _)
|
||||
|
||||
@[grind →] theorem Sublist.subset : l₁ <+ l₂ → l₁ ⊆ l₂
|
||||
| .slnil, _, h => h
|
||||
| .cons _ s, _, h => .tail _ (s.subset h)
|
||||
| .cons₂ .., _, .head .. => .head ..
|
||||
| .cons₂ _ s, _, .tail _ h => .tail _ (s.subset h)
|
||||
| .cons_cons .., _, .head .. => .head ..
|
||||
| .cons_cons _ s, _, .tail _ h => .tail _ (s.subset h)
|
||||
|
||||
protected theorem Sublist.mem (hx : a ∈ l₁) (hl : l₁ <+ l₂) : a ∈ l₂ :=
|
||||
hl.subset hx
|
||||
@@ -245,7 +249,7 @@ theorem eq_nil_of_sublist_nil {l : List α} (s : l <+ []) : l = [] :=
|
||||
theorem Sublist.length_le : l₁ <+ l₂ → length l₁ ≤ length l₂
|
||||
| .slnil => Nat.le_refl 0
|
||||
| .cons _l s => le_succ_of_le (length_le s)
|
||||
| .cons₂ _ s => succ_le_succ (length_le s)
|
||||
| .cons_cons _ s => succ_le_succ (length_le s)
|
||||
|
||||
grind_pattern Sublist.length_le => l₁ <+ l₂, length l₁
|
||||
grind_pattern Sublist.length_le => l₁ <+ l₂, length l₂
|
||||
@@ -253,7 +257,7 @@ grind_pattern Sublist.length_le => l₁ <+ l₂, length l₂
|
||||
theorem Sublist.eq_of_length : l₁ <+ l₂ → length l₁ = length l₂ → l₁ = l₂
|
||||
| .slnil, _ => rfl
|
||||
| .cons a s, h => nomatch Nat.not_lt.2 s.length_le (h ▸ lt_succ_self _)
|
||||
| .cons₂ a s, h => by rw [s.eq_of_length (succ.inj h)]
|
||||
| .cons_cons a s, h => by rw [s.eq_of_length (succ.inj h)]
|
||||
|
||||
theorem Sublist.eq_of_length_le (s : l₁ <+ l₂) (h : length l₂ ≤ length l₁) : l₁ = l₂ :=
|
||||
s.eq_of_length <| Nat.le_antisymm s.length_le h
|
||||
@@ -275,7 +279,7 @@ grind_pattern tail_sublist => tail l <+ _
|
||||
protected theorem Sublist.tail : ∀ {l₁ l₂ : List α}, l₁ <+ l₂ → tail l₁ <+ tail l₂
|
||||
| _, _, slnil => .slnil
|
||||
| _, _, Sublist.cons _ h => (tail_sublist _).trans h
|
||||
| _, _, Sublist.cons₂ _ h => h
|
||||
| _, _, Sublist.cons_cons _ h => h
|
||||
|
||||
@[grind →]
|
||||
theorem Sublist.of_cons_cons {l₁ l₂ : List α} {a b : α} (h : a :: l₁ <+ b :: l₂) : l₁ <+ l₂ :=
|
||||
@@ -287,8 +291,8 @@ protected theorem Sublist.map (f : α → β) {l₁ l₂} (s : l₁ <+ l₂) : m
|
||||
| slnil => simp
|
||||
| cons a s ih =>
|
||||
simpa using cons (f a) ih
|
||||
| cons₂ a s ih =>
|
||||
simpa using cons₂ (f a) ih
|
||||
| cons_cons a s ih =>
|
||||
simpa using cons_cons (f a) ih
|
||||
|
||||
grind_pattern Sublist.map => l₁ <+ l₂, map f l₁
|
||||
grind_pattern Sublist.map => l₁ <+ l₂, map f l₂
|
||||
@@ -338,7 +342,7 @@ theorem sublist_filterMap_iff {l₁ : List β} {f : α → Option β} :
|
||||
cases h with
|
||||
| cons _ h =>
|
||||
exact ⟨l', h, rfl⟩
|
||||
| cons₂ _ h =>
|
||||
| cons_cons _ h =>
|
||||
rename_i l'
|
||||
exact ⟨l', h, by simp_all⟩
|
||||
· constructor
|
||||
@@ -347,10 +351,10 @@ theorem sublist_filterMap_iff {l₁ : List β} {f : α → Option β} :
|
||||
| cons _ h =>
|
||||
obtain ⟨l', s, rfl⟩ := ih.1 h
|
||||
exact ⟨l', Sublist.cons a s, rfl⟩
|
||||
| cons₂ _ h =>
|
||||
| cons_cons _ h =>
|
||||
rename_i l'
|
||||
obtain ⟨l', s, rfl⟩ := ih.1 h
|
||||
refine ⟨a :: l', Sublist.cons₂ a s, ?_⟩
|
||||
refine ⟨a :: l', Sublist.cons_cons a s, ?_⟩
|
||||
rwa [filterMap_cons_some]
|
||||
· rintro ⟨l', h, rfl⟩
|
||||
replace h := h.filterMap f
|
||||
@@ -369,7 +373,7 @@ theorem sublist_filter_iff {l₁ : List α} {p : α → Bool} :
|
||||
|
||||
theorem sublist_append_left : ∀ l₁ l₂ : List α, l₁ <+ l₁ ++ l₂
|
||||
| [], _ => nil_sublist _
|
||||
| _ :: l₁, l₂ => (sublist_append_left l₁ l₂).cons₂ _
|
||||
| _ :: l₁, l₂ => (sublist_append_left l₁ l₂).cons_cons _
|
||||
|
||||
grind_pattern sublist_append_left => Sublist, l₁ ++ l₂
|
||||
|
||||
@@ -382,7 +386,7 @@ grind_pattern sublist_append_right => Sublist, l₁ ++ l₂
|
||||
@[simp, grind =] theorem singleton_sublist {a : α} {l} : [a] <+ l ↔ a ∈ l := by
|
||||
refine ⟨fun h => h.subset (mem_singleton_self _), fun h => ?_⟩
|
||||
obtain ⟨_, _, rfl⟩ := append_of_mem h
|
||||
exact ((nil_sublist _).cons₂ _).trans (sublist_append_right ..)
|
||||
exact ((nil_sublist _).cons_cons _).trans (sublist_append_right ..)
|
||||
|
||||
@[simp] theorem sublist_append_of_sublist_left (s : l <+ l₁) : l <+ l₁ ++ l₂ :=
|
||||
s.trans <| sublist_append_left ..
|
||||
@@ -404,7 +408,7 @@ theorem Sublist.append_left : l₁ <+ l₂ → ∀ l, l ++ l₁ <+ l ++ l₂ :=
|
||||
theorem Sublist.append_right : l₁ <+ l₂ → ∀ l, l₁ ++ l <+ l₂ ++ l
|
||||
| .slnil, _ => Sublist.refl _
|
||||
| .cons _ h, _ => (h.append_right _).cons _
|
||||
| .cons₂ _ h, _ => (h.append_right _).cons₂ _
|
||||
| .cons_cons _ h, _ => (h.append_right _).cons_cons _
|
||||
|
||||
theorem Sublist.append (hl : l₁ <+ l₂) (hr : r₁ <+ r₂) : l₁ ++ r₁ <+ l₂ ++ r₂ :=
|
||||
(hl.append_right _).trans ((append_sublist_append_left _).2 hr)
|
||||
@@ -418,10 +422,10 @@ theorem sublist_cons_iff {a : α} {l l'} :
|
||||
· intro h
|
||||
cases h with
|
||||
| cons _ h => exact Or.inl h
|
||||
| cons₂ _ h => exact Or.inr ⟨_, rfl, h⟩
|
||||
| cons_cons _ h => exact Or.inr ⟨_, rfl, h⟩
|
||||
· rintro (h | ⟨r, rfl, h⟩)
|
||||
· exact h.cons _
|
||||
· exact h.cons₂ _
|
||||
· exact h.cons_cons _
|
||||
|
||||
@[grind =]
|
||||
theorem cons_sublist_iff {a : α} {l l'} :
|
||||
@@ -435,7 +439,7 @@ theorem cons_sublist_iff {a : α} {l l'} :
|
||||
| cons _ w =>
|
||||
obtain ⟨r₁, r₂, rfl, h₁, h₂⟩ := ih.1 w
|
||||
exact ⟨a' :: r₁, r₂, by simp, mem_cons_of_mem a' h₁, h₂⟩
|
||||
| cons₂ _ w =>
|
||||
| cons_cons _ w =>
|
||||
exact ⟨[a], l', by simp, mem_singleton_self _, w⟩
|
||||
· rintro ⟨r₁, r₂, w, h₁, h₂⟩
|
||||
rw [w, ← singleton_append]
|
||||
@@ -458,7 +462,7 @@ theorem sublist_append_iff {l : List α} :
|
||||
| cons _ w =>
|
||||
obtain ⟨l₁, l₂, rfl, w₁, w₂⟩ := ih.1 w
|
||||
exact ⟨l₁, l₂, rfl, Sublist.cons r w₁, w₂⟩
|
||||
| cons₂ _ w =>
|
||||
| cons_cons _ w =>
|
||||
rename_i l
|
||||
obtain ⟨l₁, l₂, rfl, w₁, w₂⟩ := ih.1 w
|
||||
refine ⟨r :: l₁, l₂, by simp, cons_sublist_cons.mpr w₁, w₂⟩
|
||||
@@ -466,9 +470,9 @@ theorem sublist_append_iff {l : List α} :
|
||||
cases w₁ with
|
||||
| cons _ w₁ =>
|
||||
exact Sublist.cons _ (Sublist.append w₁ w₂)
|
||||
| cons₂ _ w₁ =>
|
||||
| cons_cons _ w₁ =>
|
||||
rename_i l
|
||||
exact Sublist.cons₂ _ (Sublist.append w₁ w₂)
|
||||
exact Sublist.cons_cons _ (Sublist.append w₁ w₂)
|
||||
|
||||
theorem append_sublist_iff {l₁ l₂ : List α} :
|
||||
l₁ ++ l₂ <+ r ↔ ∃ r₁ r₂, r = r₁ ++ r₂ ∧ l₁ <+ r₁ ∧ l₂ <+ r₂ := by
|
||||
@@ -516,7 +520,7 @@ theorem Sublist.middle {l : List α} (h : l <+ l₁ ++ l₂) (a : α) : l <+ l
|
||||
theorem Sublist.reverse : l₁ <+ l₂ → l₁.reverse <+ l₂.reverse
|
||||
| .slnil => Sublist.refl _
|
||||
| .cons _ h => by rw [reverse_cons]; exact sublist_append_of_sublist_left h.reverse
|
||||
| .cons₂ _ h => by rw [reverse_cons, reverse_cons]; exact h.reverse.append_right _
|
||||
| .cons_cons _ h => by rw [reverse_cons, reverse_cons]; exact h.reverse.append_right _
|
||||
|
||||
@[simp, grind =] theorem reverse_sublist : l₁.reverse <+ l₂.reverse ↔ l₁ <+ l₂ :=
|
||||
⟨fun h => l₁.reverse_reverse ▸ l₂.reverse_reverse ▸ h.reverse, Sublist.reverse⟩
|
||||
@@ -558,7 +562,7 @@ theorem sublist_replicate_iff : l <+ replicate m a ↔ ∃ n, n ≤ m ∧ l = re
|
||||
obtain ⟨n, le, rfl⟩ := ih.1 (sublist_of_cons_sublist w)
|
||||
obtain rfl := (mem_replicate.1 (mem_of_cons_sublist w)).2
|
||||
exact ⟨n+1, Nat.add_le_add_right le 1, rfl⟩
|
||||
| cons₂ _ w =>
|
||||
| cons_cons _ w =>
|
||||
obtain ⟨n, le, rfl⟩ := ih.1 w
|
||||
refine ⟨n+1, Nat.add_le_add_right le 1, by simp [replicate_succ]⟩
|
||||
· rintro ⟨n, le, w⟩
|
||||
@@ -644,7 +648,7 @@ theorem flatten_sublist_iff {L : List (List α)} {l} :
|
||||
cases h_sub
|
||||
case cons h_sub =>
|
||||
exact isSublist_iff_sublist.mpr h_sub
|
||||
case cons₂ =>
|
||||
case cons_cons =>
|
||||
contradiction
|
||||
|
||||
instance [DecidableEq α] (l₁ l₂ : List α) : Decidable (l₁ <+ l₂) :=
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user